Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(261)

Side by Side Diff: scripts/slave/runtest.py

Issue 565973005: Revert of Update buildbots to parse new telemetry JSON format. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « scripts/slave/results_dashboard.py ('k') | scripts/slave/telemetry.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """A tool used to run a Chrome test executable and process the output. 6 """A tool used to run a Chrome test executable and process the output.
7 7
8 This script is used by the buildbot slaves. It must be run from the outer 8 This script is used by the buildbot slaves. It must be run from the outer
9 build directory, e.g. chrome-release/build/. 9 build directory, e.g. chrome-release/build/.
10 10
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
47 # TODO(crbug.com/403564). We almost certainly shouldn't be importing this. 47 # TODO(crbug.com/403564). We almost certainly shouldn't be importing this.
48 import config 48 import config
49 49
50 from slave import annotation_utils 50 from slave import annotation_utils
51 from slave import build_directory 51 from slave import build_directory
52 from slave import crash_utils 52 from slave import crash_utils
53 from slave import gtest_slave_utils 53 from slave import gtest_slave_utils
54 from slave import performance_log_processor 54 from slave import performance_log_processor
55 from slave import results_dashboard 55 from slave import results_dashboard
56 from slave import slave_utils 56 from slave import slave_utils
57 from slave import telemetry_utils
58 from slave import xvfb 57 from slave import xvfb
59 58
60 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0]) 59 USAGE = '%s [options] test.exe [test args]' % os.path.basename(sys.argv[0])
61 60
62 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox' 61 CHROME_SANDBOX_PATH = '/opt/chromium/chrome_sandbox'
63 62
64 # Directory to write JSON for test results into. 63 # Directory to write JSON for test results into.
65 DEST_DIR = 'gtest_results' 64 DEST_DIR = 'gtest_results'
66 65
67 # Names of httpd configuration file under different platforms. 66 # Names of httpd configuration file under different platforms.
(...skipping 474 matching lines...) Expand 10 before | Expand all | Expand 10 after
542 shouldlist = selection and selection == 'list' 541 shouldlist = selection and selection == 'list'
543 if shouldlist: 542 if shouldlist:
544 print 543 print
545 print 'Available log processors:' 544 print 'Available log processors:'
546 for p in LOG_PROCESSOR_CLASSES: 545 for p in LOG_PROCESSOR_CLASSES:
547 print ' ', p, LOG_PROCESSOR_CLASSES[p].__name__ 546 print ' ', p, LOG_PROCESSOR_CLASSES[p].__name__
548 547
549 return shouldlist 548 return shouldlist
550 549
551 550
552 def _SelectLogProcessor(options, test_exe): 551 def _SelectLogProcessor(options):
553 """Returns a log processor class based on the command line options. 552 """Returns a log processor class based on the command line options.
554 553
555 Args: 554 Args:
556 options: Command-line options (from OptionParser). 555 options: Command-line options (from OptionParser).
557 test_exe: Name of the test to execute
558 556
559 Returns: 557 Returns:
560 A log processor class, or None. 558 A log processor class, or None.
561 """ 559 """
562 if _UsingGtestJson(options): 560 if _UsingGtestJson(options):
563 return gtest_utils.GTestJSONParser 561 return gtest_utils.GTestJSONParser
564 562
565 if test_exe and test_exe.endswith('telemetry.py'):
566 return telemetry_utils.TelemetryResultsTracker
567
568 if options.annotate: 563 if options.annotate:
569 if options.annotate in LOG_PROCESSOR_CLASSES: 564 if options.annotate in LOG_PROCESSOR_CLASSES:
570 if options.generate_json_file and options.annotate != 'gtest': 565 if options.generate_json_file and options.annotate != 'gtest':
571 raise NotImplementedError('"%s" doesn\'t make sense with ' 566 raise NotImplementedError('"%s" doesn\'t make sense with '
572 'options.generate_json_file.') 567 'options.generate_json_file.')
573 else: 568 else:
574 return LOG_PROCESSOR_CLASSES[options.annotate] 569 return LOG_PROCESSOR_CLASSES[options.annotate]
575 else: 570 else:
576 raise KeyError('"%s" is not a valid GTest parser!' % options.annotate) 571 raise KeyError('"%s" is not a valid GTest parser!' % options.annotate)
577 elif options.generate_json_file: 572 elif options.generate_json_file:
578 return LOG_PROCESSOR_CLASSES['gtest'] 573 return LOG_PROCESSOR_CLASSES['gtest']
579 574
580 return None 575 return None
581 576
582 577
583 def _GetCommitPos(build_properties): 578 def _GetCommitPos(build_properties):
584 """Extracts the commit position from the build properties, if its there.""" 579 """Extracts the commit position from the build properties, if its there."""
585 if 'got_revision_cp' not in build_properties: 580 if 'got_revision_cp' not in build_properties:
586 return None 581 return None
587 commit_pos = build_properties['got_revision_cp'] 582 commit_pos = build_properties['got_revision_cp']
588 return int(re.search(r'{#(\d+)}', commit_pos).group(1)) 583 return int(re.search(r'{#(\d+)}', commit_pos).group(1))
589 584
590 585
591 def _GetMainRevision(options):
592 build_dir = os.path.abspath(options.build_dir)
593 commit_pos_num = _GetCommitPos(options.build_properties)
594 if commit_pos_num is not None:
595 revision = commit_pos_num
596 elif options.revision:
597 revision = options.revision
598 else:
599 revision = _GetRevision(os.path.dirname(build_dir))
600 return revision
601
602
603 def _GetBlinkRevision(options):
604 build_dir = os.path.abspath(options.build_dir)
605
606 if options.webkit_revision:
607 webkit_revision = options.webkit_revision
608 else:
609 try:
610 webkit_dir = chromium_utils.FindUpward(
611 build_dir, 'third_party', 'WebKit', 'Source')
612 webkit_revision = _GetRevision(webkit_dir)
613 except Exception:
614 webkit_revision = None
615 return webkit_revision
616
617
618 def _GetTelemetryRevisions(options):
619 """Fills in the same revisions fields that process_log_utils does."""
620
621 versions = {}
622 versions['rev'] = _GetMainRevision(options)
623 versions['webkit_rev'] = _GetBlinkRevision(options)
624 versions['webrtc_rev'] = options.build_properties.get('got_webrtc_revision')
625 versions['v8_rev'] = options.build_properties.get('got_v8_revision')
626 versions['ver'] = options.build_properties.get('version')
627 versions['git_revision'] = options.build_properties.get('git_revision')
628 return versions
629
630
631 def _CreateLogProcessor(log_processor_class, options): 586 def _CreateLogProcessor(log_processor_class, options):
632 """Creates a log processor instance. 587 """Creates a log processor instance.
633 588
634 Args: 589 Args:
635 log_processor_class: A subclass of PerformanceLogProcessor or similar class. 590 log_processor_class: A subclass of PerformanceLogProcessor or similar class.
636 options: Command-line options (from OptionParser). 591 options: Command-line options (from OptionParser).
637 592
638 Returns: 593 Returns:
639 An instance of a log processor class, or None. 594 An instance of a log processor class, or None.
640 """ 595 """
641 if not log_processor_class: 596 if not log_processor_class:
642 return None 597 return None
643 598
644 if log_processor_class.__name__ in ('GTestLogParser', 599 if log_processor_class.__name__ in ('GTestLogParser',):
645 'TelemetryResultsTracker'):
646 tracker_obj = log_processor_class() 600 tracker_obj = log_processor_class()
647 elif log_processor_class.__name__ in ('GTestJSONParser',): 601 elif log_processor_class.__name__ in ('GTestJSONParser',):
648 tracker_obj = log_processor_class( 602 tracker_obj = log_processor_class(
649 options.build_properties.get('mastername')) 603 options.build_properties.get('mastername'))
650 else: 604 else:
651 webkit_revision = _GetBlinkRevision(options) or 'undefined' 605 build_dir = os.path.abspath(options.build_dir)
652 revision = _GetMainRevision(options) or 'undefined' 606
607 if options.webkit_revision:
608 webkit_revision = options.webkit_revision
609 else:
610 try:
611 webkit_dir = chromium_utils.FindUpward(
612 build_dir, 'third_party', 'WebKit', 'Source')
613 webkit_revision = _GetRevision(webkit_dir)
614 except Exception:
615 webkit_revision = 'undefined'
616
617 commit_pos_num = _GetCommitPos(options.build_properties)
618 if commit_pos_num is not None:
619 revision = commit_pos_num
620 elif options.revision:
621 revision = options.revision
622 else:
623 revision = _GetRevision(os.path.dirname(build_dir))
653 624
654 tracker_obj = log_processor_class( 625 tracker_obj = log_processor_class(
655 revision=revision, 626 revision=revision,
656 build_properties=options.build_properties, 627 build_properties=options.build_properties,
657 factory_properties=options.factory_properties, 628 factory_properties=options.factory_properties,
658 webkit_revision=webkit_revision) 629 webkit_revision=webkit_revision)
659 630
660 if options.annotate and options.generate_json_file: 631 if options.annotate and options.generate_json_file:
661 tracker_obj.ProcessLine(_GetMasterString(_GetMaster())) 632 tracker_obj.ProcessLine(_GetMasterString(_GetMaster()))
662 633
(...skipping 14 matching lines...) Expand all
677 supplemental_columns = {} 648 supplemental_columns = {}
678 supplemental_columns_file = os.path.join(build_dir, 649 supplemental_columns_file = os.path.join(build_dir,
679 results_dashboard.CACHE_DIR, 650 results_dashboard.CACHE_DIR,
680 supplemental_colummns_file_name) 651 supplemental_colummns_file_name)
681 if os.path.exists(supplemental_columns_file): 652 if os.path.exists(supplemental_columns_file):
682 with file(supplemental_columns_file, 'r') as f: 653 with file(supplemental_columns_file, 'r') as f:
683 supplemental_columns = json.loads(f.read()) 654 supplemental_columns = json.loads(f.read())
684 return supplemental_columns 655 return supplemental_columns
685 656
686 657
687 def _ResultsDashboardDict(options): 658 def _SendResultsToDashboard(log_processor, system, test, url, build_dir,
688 """Generates a dict of info needed by the results dashboard. 659 mastername, buildername, buildnumber,
689 660 supplemental_columns_file, extra_columns=None):
690 Args:
691 options: Program arguments.
692
693 Returns:
694 dict containing data the dashboard needs.
695 """
696 build_dir = os.path.abspath(options.build_dir)
697 supplemental_columns = _GetSupplementalColumns(
698 build_dir, options.supplemental_columns_file)
699 extra_columns = options.perf_config
700 if extra_columns:
701 supplemental_columns.update(extra_columns)
702 fields = {
703 'system': _GetPerfID(options),
704 'test': options.test_type,
705 'url': options.results_url,
706 'mastername': options.build_properties.get('mastername'),
707 'buildername': options.build_properties.get('buildername'),
708 'buildnumber': options.build_properties.get('buildnumber'),
709 'build_dir': build_dir,
710 'supplemental_columns': supplemental_columns,
711 'revisions': _GetTelemetryRevisions(options),
712 }
713 return fields
714
715
716 def _SendResultsToDashboard(log_processor, args):
717 """Sends results from a log processor instance to the dashboard. 661 """Sends results from a log processor instance to the dashboard.
718 662
719 Args: 663 Args:
720 log_processor: An instance of a log processor class, which has been used to 664 log_processor: An instance of a log processor class, which has been used to
721 process the test output, so it contains the test results. 665 process the test output, so it contains the test results.
722 args: Dict of additional args to send to results_dashboard. 666 system: A string such as 'linux-release', which comes from perf_id.
667 test: Test "suite" name string.
668 url: Dashboard URL.
669 build_dir: Build dir name (used for cache file by results_dashboard).
670 mastername: Buildbot master name, e.g. 'chromium.perf'.
671 WARNING! This is incorrectly called "masterid" in some parts of the
672 dashboard code.
673 buildername: Builder name, e.g. 'Linux QA Perf (1)'
674 buildnumber: Build number (as a string).
675 supplemental_columns_file: Filename for JSON supplemental columns file.
676 extra_columns: A dict of extra values to add to the supplemental columns
677 dict.
723 """ 678 """
724 if args['system'] is None: 679 if system is None:
725 # perf_id not specified in factory properties. 680 # perf_id not specified in factory properties.
726 print 'Error: No system name (perf_id) specified when sending to dashboard.' 681 print 'Error: No system name (perf_id) specified when sending to dashboard.'
727 return 682 return
683 supplemental_columns = _GetSupplementalColumns(
684 build_dir, supplemental_columns_file)
685 if extra_columns:
686 supplemental_columns.update(extra_columns)
728 687
729 results = [] 688 charts = _GetDataFromLogProcessor(log_processor)
730 if log_processor.IsChartJson(): 689 points = results_dashboard.MakeListOfPoints(
731 results = [results_dashboard.MakeDashboardJsonV1( 690 charts, system, test, mastername, buildername, buildnumber,
732 log_processor.ChartJson(), 691 supplemental_columns)
733 args['revisions'], args['system'], args['mastername'], 692 results_dashboard.SendResults(points, url, build_dir)
734 args['buildername'], args['buildnumber'],
735 args['supplemental_columns'], False)]
736 ref_json = log_processor.RefJson()
737 if ref_json:
738 results.append(results_dashboard.MakeDashboardJsonV1(
739 ref_json, args['revisions'], args['system'], args['mastername'],
740 args['buildername'], args['buildnumber'],
741 args['supplemental_columns'], True))
742 log_processor.Cleanup()
743 else:
744 charts = _GetDataFromLogProcessor(log_processor)
745 results = [results_dashboard.MakeListOfPoints(
746 charts, args['system'], args['test'], args['mastername'],
747 args['buildername'], args['buildnumber'], args['supplemental_columns'])]
748 for result in results:
749 results_dashboard.SendResults(result, args['url'], args['build_dir'])
750 693
751 694
752 def _GetDataFromLogProcessor(log_processor): 695 def _GetDataFromLogProcessor(log_processor):
753 """Returns a mapping of chart names to chart data. 696 """Returns a mapping of chart names to chart data.
754 697
755 Args: 698 Args:
756 log_processor: A log processor (aka results tracker) object. 699 log_processor: A log processor (aka results tracker) object.
757 700
758 Returns: 701 Returns:
759 A dictionary mapping chart name to lists of chart data. 702 A dictionary mapping chart name to lists of chart data.
(...skipping 247 matching lines...) Expand 10 before | Expand all | Expand 10 after
1007 through the specified annotation parser (aka log processor). 950 through the specified annotation parser (aka log processor).
1008 """ 951 """
1009 if not options.annotate: 952 if not options.annotate:
1010 raise chromium_utils.MissingArgument('--parse-input doesn\'t make sense ' 953 raise chromium_utils.MissingArgument('--parse-input doesn\'t make sense '
1011 'without --annotate.') 954 'without --annotate.')
1012 955
1013 # If --annotate=list was passed, list the log processor classes and exit. 956 # If --annotate=list was passed, list the log processor classes and exit.
1014 if _ListLogProcessors(options.annotate): 957 if _ListLogProcessors(options.annotate):
1015 return 0 958 return 0
1016 959
1017 log_processor_class = _SelectLogProcessor(options, None) 960 log_processor_class = _SelectLogProcessor(options)
1018 log_processor = _CreateLogProcessor(log_processor_class, options) 961 log_processor = _CreateLogProcessor(log_processor_class, options)
1019 962
1020 if options.generate_json_file: 963 if options.generate_json_file:
1021 if os.path.exists(options.test_output_xml): 964 if os.path.exists(options.test_output_xml):
1022 # remove the old XML output file. 965 # remove the old XML output file.
1023 os.remove(options.test_output_xml) 966 os.remove(options.test_output_xml)
1024 967
1025 if options.parse_input == '-': 968 if options.parse_input == '-':
1026 f = sys.stdin 969 f = sys.stdin
1027 else: 970 else:
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
1074 command = [sys.executable, test_exe] 1017 command = [sys.executable, test_exe]
1075 else: 1018 else:
1076 command = [test_exe_path] 1019 command = [test_exe_path]
1077 if options.annotate == 'gtest': 1020 if options.annotate == 'gtest':
1078 command.extend(['--brave-new-test-launcher', '--test-launcher-bot-mode']) 1021 command.extend(['--brave-new-test-launcher', '--test-launcher-bot-mode'])
1079 command.extend(args[1:]) 1022 command.extend(args[1:])
1080 1023
1081 # If --annotate=list was passed, list the log processor classes and exit. 1024 # If --annotate=list was passed, list the log processor classes and exit.
1082 if _ListLogProcessors(options.annotate): 1025 if _ListLogProcessors(options.annotate):
1083 return 0 1026 return 0
1084 log_processor_class = _SelectLogProcessor(options, test_exe) 1027 log_processor_class = _SelectLogProcessor(options)
1085 log_processor = _CreateLogProcessor(log_processor_class, options) 1028 log_processor = _CreateLogProcessor(log_processor_class, options)
1086 if hasattr(log_processor, 'IsChartJson') and log_processor.IsChartJson():
1087 command.extend(log_processor.GetArguments())
1088 1029
1089 if options.generate_json_file: 1030 if options.generate_json_file:
1090 if os.path.exists(options.test_output_xml): 1031 if os.path.exists(options.test_output_xml):
1091 # remove the old XML output file. 1032 # remove the old XML output file.
1092 os.remove(options.test_output_xml) 1033 os.remove(options.test_output_xml)
1093 1034
1094 try: 1035 try:
1095 http_server = None 1036 http_server = None
1096 if options.document_root: 1037 if options.document_root:
1097 http_server = _StartHttpServer('mac', build_dir=build_dir, 1038 http_server = _StartHttpServer('mac', build_dir=build_dir,
(...skipping 28 matching lines...) Expand all
1126 if not _GenerateJSONForTestResults(options, log_processor): 1067 if not _GenerateJSONForTestResults(options, log_processor):
1127 return 1 1068 return 1
1128 1069
1129 if options.annotate: 1070 if options.annotate:
1130 annotation_utils.annotate( 1071 annotation_utils.annotate(
1131 options.test_type, result, log_processor, 1072 options.test_type, result, log_processor,
1132 options.factory_properties.get('full_test_name'), 1073 options.factory_properties.get('full_test_name'),
1133 perf_dashboard_id=options.perf_dashboard_id) 1074 perf_dashboard_id=options.perf_dashboard_id)
1134 1075
1135 if options.results_url: 1076 if options.results_url:
1136 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options)) 1077 _SendResultsToDashboard(
1078 log_processor, _GetPerfID(options),
1079 options.test_type, options.results_url, options.build_dir,
1080 options.build_properties.get('mastername'),
1081 options.build_properties.get('buildername'),
1082 options.build_properties.get('buildnumber'),
1083 options.supplemental_columns_file,
1084 options.perf_config)
1137 1085
1138 return result 1086 return result
1139 1087
1140 1088
1141 def _MainIOS(options, args, extra_env): 1089 def _MainIOS(options, args, extra_env):
1142 """Runs the test on iOS.""" 1090 """Runs the test on iOS."""
1143 if len(args) < 1: 1091 if len(args) < 1:
1144 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) 1092 raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
1145 1093
1146 def kill_simulator(): 1094 def kill_simulator():
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
1322 command = [sys.executable, test_exe] 1270 command = [sys.executable, test_exe]
1323 else: 1271 else:
1324 command = [test_exe_path] 1272 command = [test_exe_path]
1325 if options.annotate == 'gtest': 1273 if options.annotate == 'gtest':
1326 command.extend(['--brave-new-test-launcher', '--test-launcher-bot-mode']) 1274 command.extend(['--brave-new-test-launcher', '--test-launcher-bot-mode'])
1327 command.extend(args[1:]) 1275 command.extend(args[1:])
1328 1276
1329 # If --annotate=list was passed, list the log processor classes and exit. 1277 # If --annotate=list was passed, list the log processor classes and exit.
1330 if _ListLogProcessors(options.annotate): 1278 if _ListLogProcessors(options.annotate):
1331 return 0 1279 return 0
1332 log_processor_class = _SelectLogProcessor(options, test_exe) 1280 log_processor_class = _SelectLogProcessor(options)
1333 log_processor = _CreateLogProcessor(log_processor_class, options) 1281 log_processor = _CreateLogProcessor(log_processor_class, options)
1334 if hasattr(log_processor, 'IsChartJson') and log_processor.IsChartJson():
1335 command.extend(log_processor.GetArguments())
1336 1282
1337 if options.generate_json_file: 1283 if options.generate_json_file:
1338 if os.path.exists(options.test_output_xml): 1284 if os.path.exists(options.test_output_xml):
1339 # remove the old XML output file. 1285 # remove the old XML output file.
1340 os.remove(options.test_output_xml) 1286 os.remove(options.test_output_xml)
1341 1287
1342 try: 1288 try:
1343 start_xvfb = False 1289 start_xvfb = False
1344 http_server = None 1290 http_server = None
1345 json_file_name = None 1291 json_file_name = None
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
1397 if not _GenerateJSONForTestResults(options, log_processor): 1343 if not _GenerateJSONForTestResults(options, log_processor):
1398 return 1 1344 return 1
1399 1345
1400 if options.annotate: 1346 if options.annotate:
1401 annotation_utils.annotate( 1347 annotation_utils.annotate(
1402 options.test_type, result, log_processor, 1348 options.test_type, result, log_processor,
1403 options.factory_properties.get('full_test_name'), 1349 options.factory_properties.get('full_test_name'),
1404 perf_dashboard_id=options.perf_dashboard_id) 1350 perf_dashboard_id=options.perf_dashboard_id)
1405 1351
1406 if options.results_url: 1352 if options.results_url:
1407 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options)) 1353 _SendResultsToDashboard(
1354 log_processor, _GetPerfID(options),
1355 options.test_type, options.results_url, options.build_dir,
1356 options.build_properties.get('mastername'),
1357 options.build_properties.get('buildername'),
1358 options.build_properties.get('buildnumber'),
1359 options.supplemental_columns_file,
1360 options.perf_config)
1408 1361
1409 return result 1362 return result
1410 1363
1411 1364
1412 def _MainWin(options, args, extra_env): 1365 def _MainWin(options, args, extra_env):
1413 """Runs tests on windows. 1366 """Runs tests on windows.
1414 1367
1415 Using the target build configuration, run the executable given in the 1368 Using the target build configuration, run the executable given in the
1416 first non-option argument, passing any following arguments to that 1369 first non-option argument, passing any following arguments to that
1417 executable. 1370 executable.
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
1466 '--'] + command 1419 '--'] + command
1467 command.extend(args[1:]) 1420 command.extend(args[1:])
1468 1421
1469 # Nuke anything that appears to be stale chrome items in the temporary 1422 # Nuke anything that appears to be stale chrome items in the temporary
1470 # directory from previous test runs (i.e.- from crashes or unittest leaks). 1423 # directory from previous test runs (i.e.- from crashes or unittest leaks).
1471 slave_utils.RemoveChromeTemporaryFiles() 1424 slave_utils.RemoveChromeTemporaryFiles()
1472 1425
1473 # If --annotate=list was passed, list the log processor classes and exit. 1426 # If --annotate=list was passed, list the log processor classes and exit.
1474 if _ListLogProcessors(options.annotate): 1427 if _ListLogProcessors(options.annotate):
1475 return 0 1428 return 0
1476 log_processor_class = _SelectLogProcessor(options, test_exe) 1429 log_processor_class = _SelectLogProcessor(options)
1477 log_processor = _CreateLogProcessor(log_processor_class, options) 1430 log_processor = _CreateLogProcessor(log_processor_class, options)
1478 if hasattr(log_processor, 'IsChartJson') and log_processor.IsChartJson():
1479 command.extend(log_processor.GetArguments())
1480 1431
1481 if options.generate_json_file: 1432 if options.generate_json_file:
1482 if os.path.exists(options.test_output_xml): 1433 if os.path.exists(options.test_output_xml):
1483 # remove the old XML output file. 1434 # remove the old XML output file.
1484 os.remove(options.test_output_xml) 1435 os.remove(options.test_output_xml)
1485 1436
1486 try: 1437 try:
1487 http_server = None 1438 http_server = None
1488 if options.document_root: 1439 if options.document_root:
1489 http_server = _StartHttpServer('win', build_dir=build_dir, 1440 http_server = _StartHttpServer('win', build_dir=build_dir,
(...skipping 24 matching lines...) Expand all
1514 if not _GenerateJSONForTestResults(options, log_processor): 1465 if not _GenerateJSONForTestResults(options, log_processor):
1515 return 1 1466 return 1
1516 1467
1517 if options.annotate: 1468 if options.annotate:
1518 annotation_utils.annotate( 1469 annotation_utils.annotate(
1519 options.test_type, result, log_processor, 1470 options.test_type, result, log_processor,
1520 options.factory_properties.get('full_test_name'), 1471 options.factory_properties.get('full_test_name'),
1521 perf_dashboard_id=options.perf_dashboard_id) 1472 perf_dashboard_id=options.perf_dashboard_id)
1522 1473
1523 if options.results_url: 1474 if options.results_url:
1524 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options)) 1475 _SendResultsToDashboard(
1476 log_processor, _GetPerfID(options),
1477 options.test_type, options.results_url, options.build_dir,
1478 options.build_properties.get('mastername'),
1479 options.build_properties.get('buildername'),
1480 options.build_properties.get('buildnumber'),
1481 options.supplemental_columns_file,
1482 options.perf_config)
1525 1483
1526 return result 1484 return result
1527 1485
1528 1486
1529 def _MainAndroid(options, args, extra_env): 1487 def _MainAndroid(options, args, extra_env):
1530 """Runs tests on android. 1488 """Runs tests on android.
1531 1489
1532 Running GTest-based tests on android is different than on Linux as it requires 1490 Running GTest-based tests on android is different than on Linux as it requires
1533 src/build/android/test_runner.py to deploy and communicate with the device. 1491 src/build/android/test_runner.py to deploy and communicate with the device.
1534 Python scripts are the same as with Linux. 1492 Python scripts are the same as with Linux.
1535 1493
1536 Args: 1494 Args:
1537 options: Command-line options for this invocation of runtest.py. 1495 options: Command-line options for this invocation of runtest.py.
1538 args: Command and arguments for the test. 1496 args: Command and arguments for the test.
1539 extra_env: A dictionary of extra environment variables to set. 1497 extra_env: A dictionary of extra environment variables to set.
1540 1498
1541 Returns: 1499 Returns:
1542 Exit status code. 1500 Exit status code.
1543 """ 1501 """
1544 if options.run_python_script: 1502 if options.run_python_script:
1545 return _MainLinux(options, args, extra_env) 1503 return _MainLinux(options, args, extra_env)
1546 1504
1547 if len(args) < 1: 1505 if len(args) < 1:
1548 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) 1506 raise chromium_utils.MissingArgument('Usage: %s' % USAGE)
1549 1507
1550 if _ListLogProcessors(options.annotate): 1508 if _ListLogProcessors(options.annotate):
1551 return 0 1509 return 0
1552 log_processor_class = _SelectLogProcessor(options, args[0]) 1510 log_processor_class = _SelectLogProcessor(options)
1553 log_processor = _CreateLogProcessor(log_processor_class, options) 1511 log_processor = _CreateLogProcessor(log_processor_class, options)
1554 1512
1555 if options.generate_json_file: 1513 if options.generate_json_file:
1556 if os.path.exists(options.test_output_xml): 1514 if os.path.exists(options.test_output_xml):
1557 # remove the old XML output file. 1515 # remove the old XML output file.
1558 os.remove(options.test_output_xml) 1516 os.remove(options.test_output_xml)
1559 1517
1560 # Assume it's a gtest apk, so use the android harness. 1518 # Assume it's a gtest apk, so use the android harness.
1561 test_suite = args[0] 1519 test_suite = args[0]
1562 run_test_target_option = '--release' 1520 run_test_target_option = '--release'
1563 if options.target == 'Debug': 1521 if options.target == 'Debug':
1564 run_test_target_option = '--debug' 1522 run_test_target_option = '--debug'
1565 command = ['src/build/android/test_runner.py', 'gtest', 1523 command = ['src/build/android/test_runner.py', 'gtest',
1566 run_test_target_option, '-s', test_suite] 1524 run_test_target_option, '-s', test_suite]
1567 result = _RunGTestCommand(command, extra_env, log_processor=log_processor) 1525 result = _RunGTestCommand(command, extra_env, log_processor=log_processor)
1568 1526
1569 if options.generate_json_file: 1527 if options.generate_json_file:
1570 if not _GenerateJSONForTestResults(options, log_processor): 1528 if not _GenerateJSONForTestResults(options, log_processor):
1571 return 1 1529 return 1
1572 1530
1573 if options.annotate: 1531 if options.annotate:
1574 annotation_utils.annotate( 1532 annotation_utils.annotate(
1575 options.test_type, result, log_processor, 1533 options.test_type, result, log_processor,
1576 options.factory_properties.get('full_test_name'), 1534 options.factory_properties.get('full_test_name'),
1577 perf_dashboard_id=options.perf_dashboard_id) 1535 perf_dashboard_id=options.perf_dashboard_id)
1578 1536
1579 if options.results_url: 1537 if options.results_url:
1580 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options)) 1538 _SendResultsToDashboard(
1539 log_processor, _GetPerfID(options),
1540 options.test_type, options.results_url, options.build_dir,
1541 options.build_properties.get('mastername'),
1542 options.build_properties.get('buildername'),
1543 options.build_properties.get('buildnumber'),
1544 options.supplemental_columns_file,
1545 options.perf_config)
1581 1546
1582 return result 1547 return result
1583 1548
1584 1549
1585 def main(): 1550 def main():
1586 """Entry point for runtest.py. 1551 """Entry point for runtest.py.
1587 1552
1588 This function: 1553 This function:
1589 (1) Sets up the command-line options. 1554 (1) Sets up the command-line options.
1590 (2) Sets environment variables based on those options. 1555 (2) Sets environment variables based on those options.
(...skipping 387 matching lines...) Expand 10 before | Expand all | Expand 10 after
1978 finally: 1943 finally:
1979 if did_launch_dbus: 1944 if did_launch_dbus:
1980 # It looks like the command line argument --exit-with-session 1945 # It looks like the command line argument --exit-with-session
1981 # isn't working to clean up the spawned dbus-daemon. Kill it 1946 # isn't working to clean up the spawned dbus-daemon. Kill it
1982 # manually. 1947 # manually.
1983 _ShutdownDBus() 1948 _ShutdownDBus()
1984 1949
1985 1950
1986 if '__main__' == __name__: 1951 if '__main__' == __name__:
1987 sys.exit(main()) 1952 sys.exit(main())
OLDNEW
« no previous file with comments | « scripts/slave/results_dashboard.py ('k') | scripts/slave/telemetry.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698