| OLD | NEW |
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """A tool used to run a Chrome test executable and process the output. | 6 """A tool used to run a Chrome test executable and process the output. |
| 7 | 7 |
| 8 This script is used by the buildbot slaves. It must be run from the outer | 8 This script is used by the buildbot slaves. It must be run from the outer |
| 9 build directory, e.g. chrome-release/build/. | 9 build directory, e.g. chrome-release/build/. |
| 10 | 10 |
| (...skipping 683 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 694 'mastername': options.build_properties.get('mastername'), | 694 'mastername': options.build_properties.get('mastername'), |
| 695 'buildername': options.build_properties.get('buildername'), | 695 'buildername': options.build_properties.get('buildername'), |
| 696 'buildnumber': options.build_properties.get('buildnumber'), | 696 'buildnumber': options.build_properties.get('buildnumber'), |
| 697 'build_dir': build_dir, | 697 'build_dir': build_dir, |
| 698 'supplemental_columns': supplemental_columns, | 698 'supplemental_columns': supplemental_columns, |
| 699 'revisions': _GetTelemetryRevisions(options), | 699 'revisions': _GetTelemetryRevisions(options), |
| 700 } | 700 } |
| 701 return fields | 701 return fields |
| 702 | 702 |
| 703 | 703 |
| 704 def _GenerateDashboardJson(log_processor, args): |
| 705 """Generates chartjson to send to the dashboard. |
| 706 |
| 707 Args: |
| 708 log_processor: An instance of a log processor class, which has been used to |
| 709 process the test output, so it contains the test results. |
| 710 args: Dict of additional args to send to results_dashboard. |
| 711 """ |
| 712 assert log_processor.IsChartJson() |
| 713 |
| 714 chart_json = log_processor.ChartJson() |
| 715 if chart_json: |
| 716 return results_dashboard.MakeDashboardJsonV1( |
| 717 chart_json, |
| 718 args['revisions'], args['system'], args['mastername'], |
| 719 args['buildername'], args['buildnumber'], |
| 720 args['supplemental_columns'], log_processor.IsReferenceBuild()) |
| 721 return None |
| 722 |
| 723 |
| 724 def _WriteChartJsonToOutput(chartjson_file, log_processor, args): |
| 725 """Writes the dashboard chartjson to a file for display in the waterfall. |
| 726 |
| 727 Args: |
| 728 chartjson_file: Path to the file to write the chartjson. |
| 729 log_processor: An instance of a log processor class, which has been used to |
| 730 process the test output, so it contains the test results. |
| 731 args: Dict of additional args to send to results_dashboard. |
| 732 """ |
| 733 assert log_processor.IsChartJson() |
| 734 |
| 735 chartjson_data = _GenerateDashboardJson(log_processor, args) |
| 736 |
| 737 with open(chartjson_file, 'w') as f: |
| 738 json.dump(chartjson_data, f) |
| 739 |
| 740 |
| 704 def _SendResultsToDashboard(log_processor, args): | 741 def _SendResultsToDashboard(log_processor, args): |
| 705 """Sends results from a log processor instance to the dashboard. | 742 """Sends results from a log processor instance to the dashboard. |
| 706 | 743 |
| 707 Args: | 744 Args: |
| 708 log_processor: An instance of a log processor class, which has been used to | 745 log_processor: An instance of a log processor class, which has been used to |
| 709 process the test output, so it contains the test results. | 746 process the test output, so it contains the test results. |
| 710 args: Dict of additional args to send to results_dashboard. | 747 args: Dict of additional args to send to results_dashboard. |
| 711 """ | 748 """ |
| 712 if args['system'] is None: | 749 if args['system'] is None: |
| 713 # perf_id not specified in factory properties. | 750 # perf_id not specified in factory properties. |
| 714 print 'Error: No system name (perf_id) specified when sending to dashboard.' | 751 print 'Error: No system name (perf_id) specified when sending to dashboard.' |
| 715 return | 752 return |
| 716 | 753 |
| 717 results = None | 754 results = None |
| 718 if log_processor.IsChartJson(): | 755 if log_processor.IsChartJson(): |
| 719 chart_json = log_processor.ChartJson() | 756 results = _GenerateDashboardJson(log_processor, args) |
| 720 if chart_json: | 757 if not results: |
| 721 results = results_dashboard.MakeDashboardJsonV1( | |
| 722 chart_json, | |
| 723 args['revisions'], args['system'], args['mastername'], | |
| 724 args['buildername'], args['buildnumber'], | |
| 725 args['supplemental_columns'], log_processor.IsReferenceBuild()) | |
| 726 else: | |
| 727 print 'Error: No json output from telemetry.' | 758 print 'Error: No json output from telemetry.' |
| 728 print '@@@STEP_FAILURE@@@' | 759 print '@@@STEP_FAILURE@@@' |
| 729 log_processor.Cleanup() | 760 log_processor.Cleanup() |
| 730 else: | 761 else: |
| 731 charts = _GetDataFromLogProcessor(log_processor) | 762 charts = _GetDataFromLogProcessor(log_processor) |
| 732 results = results_dashboard.MakeListOfPoints( | 763 results = results_dashboard.MakeListOfPoints( |
| 733 charts, args['system'], args['test'], args['mastername'], | 764 charts, args['system'], args['test'], args['mastername'], |
| 734 args['buildername'], args['buildnumber'], args['supplemental_columns']) | 765 args['buildername'], args['buildnumber'], args['supplemental_columns']) |
| 735 if results: | 766 if results: |
| 736 logging.debug(json.dumps(results, indent=2)) | 767 logging.debug(json.dumps(results, indent=2)) |
| (...skipping 376 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1113 if options.generate_json_file: | 1144 if options.generate_json_file: |
| 1114 if not _GenerateJSONForTestResults(options, log_processor): | 1145 if not _GenerateJSONForTestResults(options, log_processor): |
| 1115 return 1 | 1146 return 1 |
| 1116 | 1147 |
| 1117 if options.annotate: | 1148 if options.annotate: |
| 1118 annotation_utils.annotate( | 1149 annotation_utils.annotate( |
| 1119 options.test_type, result, log_processor, | 1150 options.test_type, result, log_processor, |
| 1120 options.factory_properties.get('full_test_name'), | 1151 options.factory_properties.get('full_test_name'), |
| 1121 perf_dashboard_id=options.perf_dashboard_id) | 1152 perf_dashboard_id=options.perf_dashboard_id) |
| 1122 | 1153 |
| 1154 if options.chartjson_file and telemetry_info: |
| 1155 _WriteChartJsonToOutput(options.chartjson_file, |
| 1156 log_processor, |
| 1157 _ResultsDashboardDict(options)) |
| 1158 |
| 1123 if options.results_url: | 1159 if options.results_url: |
| 1124 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options)) | 1160 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options)) |
| 1125 | 1161 |
| 1126 return result | 1162 return result |
| 1127 | 1163 |
| 1128 | 1164 |
| 1129 def _MainIOS(options, args, extra_env): | 1165 def _MainIOS(options, args, extra_env): |
| 1130 """Runs the test on iOS.""" | 1166 """Runs the test on iOS.""" |
| 1131 if len(args) < 1: | 1167 if len(args) < 1: |
| 1132 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) | 1168 raise chromium_utils.MissingArgument('Usage: %s' % USAGE) |
| (...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1385 if options.generate_json_file: | 1421 if options.generate_json_file: |
| 1386 if not _GenerateJSONForTestResults(options, log_processor): | 1422 if not _GenerateJSONForTestResults(options, log_processor): |
| 1387 return 1 | 1423 return 1 |
| 1388 | 1424 |
| 1389 if options.annotate: | 1425 if options.annotate: |
| 1390 annotation_utils.annotate( | 1426 annotation_utils.annotate( |
| 1391 options.test_type, result, log_processor, | 1427 options.test_type, result, log_processor, |
| 1392 options.factory_properties.get('full_test_name'), | 1428 options.factory_properties.get('full_test_name'), |
| 1393 perf_dashboard_id=options.perf_dashboard_id) | 1429 perf_dashboard_id=options.perf_dashboard_id) |
| 1394 | 1430 |
| 1431 if options.chartjson_file and telemetry_info: |
| 1432 _WriteChartJsonToOutput(options.chartjson_file, |
| 1433 log_processor, |
| 1434 _ResultsDashboardDict(options)) |
| 1435 |
| 1395 if options.results_url: | 1436 if options.results_url: |
| 1396 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options)) | 1437 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options)) |
| 1397 | 1438 |
| 1398 return result | 1439 return result |
| 1399 | 1440 |
| 1400 | 1441 |
| 1401 def _MainWin(options, args, extra_env): | 1442 def _MainWin(options, args, extra_env): |
| 1402 """Runs tests on windows. | 1443 """Runs tests on windows. |
| 1403 | 1444 |
| 1404 Using the target build configuration, run the executable given in the | 1445 Using the target build configuration, run the executable given in the |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1502 if options.generate_json_file: | 1543 if options.generate_json_file: |
| 1503 if not _GenerateJSONForTestResults(options, log_processor): | 1544 if not _GenerateJSONForTestResults(options, log_processor): |
| 1504 return 1 | 1545 return 1 |
| 1505 | 1546 |
| 1506 if options.annotate: | 1547 if options.annotate: |
| 1507 annotation_utils.annotate( | 1548 annotation_utils.annotate( |
| 1508 options.test_type, result, log_processor, | 1549 options.test_type, result, log_processor, |
| 1509 options.factory_properties.get('full_test_name'), | 1550 options.factory_properties.get('full_test_name'), |
| 1510 perf_dashboard_id=options.perf_dashboard_id) | 1551 perf_dashboard_id=options.perf_dashboard_id) |
| 1511 | 1552 |
| 1553 if options.chartjson_file and telemetry_info: |
| 1554 _WriteChartJsonToOutput(options.chartjson_file, |
| 1555 log_processor, |
| 1556 _ResultsDashboardDict(options)) |
| 1557 |
| 1512 if options.results_url: | 1558 if options.results_url: |
| 1513 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options)) | 1559 _SendResultsToDashboard(log_processor, _ResultsDashboardDict(options)) |
| 1514 | 1560 |
| 1515 return result | 1561 return result |
| 1516 | 1562 |
| 1517 | 1563 |
| 1518 def _MainAndroid(options, args, extra_env): | 1564 def _MainAndroid(options, args, extra_env): |
| 1519 """Runs tests on android. | 1565 """Runs tests on android. |
| 1520 | 1566 |
| 1521 Running GTest-based tests on android is different than on Linux as it requires | 1567 Running GTest-based tests on android is different than on Linux as it requires |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1582 Ensures that --output=chartjson is set and adds a --output argument. | 1628 Ensures that --output=chartjson is set and adds a --output argument. |
| 1583 | 1629 |
| 1584 Arguments: | 1630 Arguments: |
| 1585 args: list of command line arguments, starts with 'run_benchmark' for | 1631 args: list of command line arguments, starts with 'run_benchmark' for |
| 1586 telemetry tests. | 1632 telemetry tests. |
| 1587 | 1633 |
| 1588 Returns: | 1634 Returns: |
| 1589 None if not a telemetry test, otherwise a | 1635 None if not a telemetry test, otherwise a |
| 1590 dict containing the output filename and whether it is a reference build. | 1636 dict containing the output filename and whether it is a reference build. |
| 1591 """ | 1637 """ |
| 1592 # Temporarily revert while investigating crbug.com/423034 | |
| 1593 # pylint: disable=W0101 | |
| 1594 return {} | |
| 1595 if not args[0].endswith('run_benchmark'): | 1638 if not args[0].endswith('run_benchmark'): |
| 1596 # Not a telemetry run | 1639 # Not a telemetry run |
| 1597 return None | 1640 return None |
| 1598 | 1641 |
| 1599 is_ref = '--browser=reference' in args | 1642 is_ref = '--browser=reference' in args |
| 1600 | 1643 |
| 1601 if '--output-format=buildbot' in args: | 1644 if '--output-format=buildbot' in args: |
| 1602 args[args.index('--output-format=buildbot')] = '--output-format=chartjson' | 1645 args[args.index('--output-format=buildbot')] = '--output-format=chartjson' |
| 1603 | 1646 |
| 1604 output_dir = tempfile.mkdtemp() | 1647 output_dir = tempfile.mkdtemp() |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1692 option_parser.add_option('--xvfb', action='store_true', dest='xvfb', | 1735 option_parser.add_option('--xvfb', action='store_true', dest='xvfb', |
| 1693 default=True, | 1736 default=True, |
| 1694 help='Start virtual X server on Linux.') | 1737 help='Start virtual X server on Linux.') |
| 1695 option_parser.add_option('--no-xvfb', action='store_false', dest='xvfb', | 1738 option_parser.add_option('--no-xvfb', action='store_false', dest='xvfb', |
| 1696 help='Do not start virtual X server on Linux.') | 1739 help='Do not start virtual X server on Linux.') |
| 1697 option_parser.add_option('--sharding-args', dest='sharding_args', | 1740 option_parser.add_option('--sharding-args', dest='sharding_args', |
| 1698 default='', | 1741 default='', |
| 1699 help='Options to pass to sharding_supervisor.') | 1742 help='Options to pass to sharding_supervisor.') |
| 1700 option_parser.add_option('-o', '--results-directory', default='', | 1743 option_parser.add_option('-o', '--results-directory', default='', |
| 1701 help='output results directory for JSON file.') | 1744 help='output results directory for JSON file.') |
| 1745 option_parser.add_option('--chartjson-file', default='', |
| 1746 help='File to dump chartjson results.') |
| 1702 option_parser.add_option('--builder-name', default=None, | 1747 option_parser.add_option('--builder-name', default=None, |
| 1703 help='The name of the builder running this script.') | 1748 help='The name of the builder running this script.') |
| 1704 option_parser.add_option('--slave-name', default=None, | 1749 option_parser.add_option('--slave-name', default=None, |
| 1705 help='The name of the slave running this script.') | 1750 help='The name of the slave running this script.') |
| 1706 option_parser.add_option('--master-class-name', default=None, | 1751 option_parser.add_option('--master-class-name', default=None, |
| 1707 help='The class name of the buildbot master running ' | 1752 help='The class name of the buildbot master running ' |
| 1708 'this script: examples include "Chromium", ' | 1753 'this script: examples include "Chromium", ' |
| 1709 '"ChromiumWebkit", and "ChromiumGPU". The ' | 1754 '"ChromiumWebkit", and "ChromiumGPU". The ' |
| 1710 'flakiness dashboard uses this value to ' | 1755 'flakiness dashboard uses this value to ' |
| 1711 'categorize results. See buildershandler.py ' | 1756 'categorize results. See buildershandler.py ' |
| (...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2017 finally: | 2062 finally: |
| 2018 if did_launch_dbus: | 2063 if did_launch_dbus: |
| 2019 # It looks like the command line argument --exit-with-session | 2064 # It looks like the command line argument --exit-with-session |
| 2020 # isn't working to clean up the spawned dbus-daemon. Kill it | 2065 # isn't working to clean up the spawned dbus-daemon. Kill it |
| 2021 # manually. | 2066 # manually. |
| 2022 _ShutdownDBus() | 2067 _ShutdownDBus() |
| 2023 | 2068 |
| 2024 | 2069 |
| 2025 if '__main__' == __name__: | 2070 if '__main__' == __name__: |
| 2026 sys.exit(main()) | 2071 sys.exit(main()) |
| OLD | NEW |