OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Run Performance Test Bisect Tool | 6 """Run Performance Test Bisect Tool |
7 | 7 |
8 This script is used by a try bot to run the bisect script with the parameters | 8 This script is used by a try bot to run the bisect script with the parameters |
9 specified in the bisect config file. It checks out a copy of the depot in | 9 specified in the bisect config file. It checks out a copy of the depot in |
10 a subdirectory 'bisect' of the working directory provided, annd runs the | 10 a subdirectory 'bisect' of the working directory provided, annd runs the |
11 bisect scrip there. | 11 bisect scrip there. |
12 """ | 12 """ |
13 | 13 |
| 14 import json |
14 import optparse | 15 import optparse |
15 import os | 16 import os |
16 import platform | 17 import platform |
17 import re | 18 import re |
| 19 import shlex |
18 import subprocess | 20 import subprocess |
19 import sys | 21 import sys |
20 import traceback | 22 import traceback |
21 | 23 |
22 from auto_bisect import bisect_perf_regression | 24 from auto_bisect import bisect_perf_regression |
23 from auto_bisect import bisect_utils | 25 from auto_bisect import bisect_utils |
24 from auto_bisect import math_utils | 26 from auto_bisect import math_utils |
25 from auto_bisect import source_control | 27 from auto_bisect import source_control |
26 | 28 |
27 CROS_BOARD_ENV = 'BISECT_CROS_BOARD' | 29 CROS_BOARD_ENV = 'BISECT_CROS_BOARD' |
28 CROS_IP_ENV = 'BISECT_CROS_IP' | 30 CROS_IP_ENV = 'BISECT_CROS_IP' |
29 | |
30 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) | 31 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) |
31 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir) | 32 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir) |
32 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg') | 33 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg') |
33 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg') | 34 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg') |
34 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join( | 35 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join( |
35 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg') | 36 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg') |
36 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect') | 37 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect') |
37 | 38 |
| 39 PERF_BENCHMARKS_PATH = 'tools/perf/benchmarks' |
| 40 BUILDBOT_BUILDERNAME = 'BUILDBOT_BUILDERNAME' |
| 41 BENCHMARKS_JSON_FILE = 'benchmarks.json' |
38 | 42 |
39 class Goma(object): | 43 class Goma(object): |
40 | 44 |
41 def __init__(self, path_to_goma): | 45 def __init__(self, path_to_goma): |
42 self._abs_path_to_goma = None | 46 self._abs_path_to_goma = None |
43 self._abs_path_to_goma_file = None | 47 self._abs_path_to_goma_file = None |
44 if not path_to_goma: | 48 if not path_to_goma: |
45 return | 49 return |
46 self._abs_path_to_goma = os.path.abspath(path_to_goma) | 50 self._abs_path_to_goma = os.path.abspath(path_to_goma) |
47 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh' | 51 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh' |
(...skipping 382 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
430 | 434 |
431 results_without_patch = _RunCommandStepForPerformanceTest( | 435 results_without_patch = _RunCommandStepForPerformanceTest( |
432 b, opts, False, True, annotations_dict['results_label2'], | 436 b, opts, False, True, annotations_dict['results_label2'], |
433 annotations_dict['run2']) | 437 annotations_dict['run2']) |
434 | 438 |
435 # Find the link to the cloud stored results file. | 439 # Find the link to the cloud stored results file. |
436 _ParseAndOutputCloudLinks( | 440 _ParseAndOutputCloudLinks( |
437 results_without_patch, results_with_patch, annotations_dict) | 441 results_without_patch, results_with_patch, annotations_dict) |
438 | 442 |
439 | 443 |
440 def _SetupAndRunPerformanceTest(config, path_to_goma): | 444 def _SetupAndRunPerformanceTest(config, path_to_goma, is_cq_tryjob=False): |
441 """Attempts to build and run the current revision with and without the | 445 """Attempts to build and run the current revision with and without the |
442 current patch, with the parameters passed in. | 446 current patch, with the parameters passed in. |
443 | 447 |
444 Args: | 448 Args: |
445 config: The config read from run-perf-test.cfg. | 449 config: The config read from run-perf-test.cfg. |
446 path_to_goma: Path to goma directory. | 450 path_to_goma: Path to goma directory. |
| 451 is_cq_tryjob: Whether or not the try job was initiated by commit queue. |
447 | 452 |
448 Returns: | 453 Returns: |
449 An exit code: 0 on success, otherwise 1. | 454 An exit code: 0 on success, otherwise 1. |
450 """ | 455 """ |
451 if platform.release() == 'XP': | 456 if platform.release() == 'XP': |
452 print 'Windows XP is not supported for perf try jobs because it lacks ' | 457 print 'Windows XP is not supported for perf try jobs because it lacks ' |
453 print 'goma support. Please refer to crbug.com/330900.' | 458 print 'goma support. Please refer to crbug.com/330900.' |
454 return 1 | 459 return 1 |
455 try: | 460 try: |
456 with Goma(path_to_goma) as _: | 461 with Goma(path_to_goma) as _: |
457 config['use_goma'] = bool(path_to_goma) | 462 config['use_goma'] = bool(path_to_goma) |
458 if config['use_goma']: | 463 if config['use_goma']: |
459 config['goma_dir'] = os.path.abspath(path_to_goma) | 464 config['goma_dir'] = os.path.abspath(path_to_goma) |
460 _RunPerformanceTest(config) | 465 if not is_cq_tryjob: |
| 466 _RunPerformanceTest(config) |
| 467 else: |
| 468 return _RunBenchmarksForCommitQueue(config) |
461 return 0 | 469 return 0 |
462 except RuntimeError, e: | 470 except RuntimeError, e: |
463 bisect_utils.OutputAnnotationStepFailure() | 471 bisect_utils.OutputAnnotationStepFailure() |
464 bisect_utils.OutputAnnotationStepClosed() | 472 bisect_utils.OutputAnnotationStepClosed() |
465 _OutputFailedResults('Error: %s' % e.message) | 473 _OutputFailedResults('Error: %s' % e.message) |
466 return 1 | 474 return 1 |
467 | 475 |
468 | 476 |
469 def _RunBisectionScript( | 477 def _RunBisectionScript( |
470 config, working_directory, path_to_goma, path_to_extra_src, dry_run): | 478 config, working_directory, path_to_goma, path_to_extra_src, dry_run): |
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
559 def _PrintConfigStep(config): | 567 def _PrintConfigStep(config): |
560 """Prints out the given config, along with Buildbot annotations.""" | 568 """Prints out the given config, along with Buildbot annotations.""" |
561 bisect_utils.OutputAnnotationStepStart('Config') | 569 bisect_utils.OutputAnnotationStepStart('Config') |
562 print | 570 print |
563 for k, v in config.iteritems(): | 571 for k, v in config.iteritems(): |
564 print ' %s : %s' % (k, v) | 572 print ' %s : %s' % (k, v) |
565 print | 573 print |
566 bisect_utils.OutputAnnotationStepClosed() | 574 bisect_utils.OutputAnnotationStepClosed() |
567 | 575 |
568 | 576 |
| 577 def _GetBrowserType(bot_platform): |
| 578 """Gets the browser type to be used in the run benchmark command.""" |
| 579 if bot_platform == 'android': |
| 580 return 'android-chrome-shell' |
| 581 elif 'x64' in bot_platform: |
| 582 return 'release_x64' |
| 583 |
| 584 return 'release' |
| 585 |
| 586 |
| 587 |
| 588 def _GuessTelemetryTestCommand(bot_platform, test_name=None): |
| 589 """Creates a Telemetry benchmark command based on bot and test name.""" |
| 590 command = [] |
| 591 # On Windows, Python scripts should be prefixed with the python command. |
| 592 if bot_platform == 'win': |
| 593 command.append('python') |
| 594 command.append('tools/perf/run_benchmark') |
| 595 command.append('-v') |
| 596 command.append('--browser=%s' % _GetBrowserType(bot_platform)) |
| 597 if test_name: |
| 598 command.append(test_name) |
| 599 |
| 600 return ' '.join(command) |
| 601 |
| 602 |
| 603 def _GetConfigBasedOnPlatform(config, bot_name, test_name): |
| 604 """Generates required options to create BisectPerformanceMetrics instance.""" |
| 605 opts_dict = { |
| 606 'command': _GuessTelemetryTestCommand(bot_name, test_name), |
| 607 'target_arch': 'x64' if 'x64' in bot_name else 'ia32', |
| 608 'build_preference': 'ninja', |
| 609 'output_buildbot_annotations': True, |
| 610 'repeat_test_count': 1, |
| 611 'bisect_mode': bisect_utils.BISECT_MODE_RETURN_CODE, |
| 612 } |
| 613 |
| 614 if 'use_goma' in config: |
| 615 opts_dict['use_goma'] = config['use_goma'] |
| 616 if 'goma_dir' in config: |
| 617 opts_dict['goma_dir'] = config['goma_dir'] |
| 618 if 'android-chrome-shell' in opts_dict['command']: |
| 619 opts_dict['target_platform'] = 'android' |
| 620 |
| 621 return bisect_perf_regression.BisectOptions.FromDict(opts_dict) |
| 622 |
| 623 |
| 624 def _GetModifiedFilesFromPatch(cwd=None): |
| 625 """Gets list of files modified in the current patch.""" |
| 626 log_output = bisect_utils.CheckRunGit( |
| 627 ['diff', '--no-ext-diff', '--name-only', 'HEAD~1'], cwd=cwd) |
| 628 modified_files = log_output.split() |
| 629 return modified_files |
| 630 |
| 631 |
| 632 def _GetAffectedBenchmarkModuleNames(): |
| 633 """Gets list of modified benchmark files under tools/perf/benchmarks.""" |
| 634 all_affected_files = _GetModifiedFilesFromPatch() |
| 635 modified_benchmarks = [] |
| 636 for affected_file in all_affected_files: |
| 637 if affected_file.startswith(PERF_BENCHMARKS_PATH): |
| 638 benchmark = os.path.basename(os.path.splitext(affected_file)[0]) |
| 639 modified_benchmarks.append(benchmark) |
| 640 return modified_benchmarks |
| 641 |
| 642 |
| 643 def _ListAvailableBenchmarks(bot_platform): |
| 644 """Gets all available benchmarks names as a list.""" |
| 645 browser_type = _GetBrowserType(bot_platform) |
| 646 if os.path.exists(BENCHMARKS_JSON_FILE): |
| 647 os.remove(BENCHMARKS_JSON_FILE) |
| 648 command = [] |
| 649 if 'win' in bot_platform: |
| 650 command.append('python') |
| 651 command.append('tools/perf/run_benchmark') |
| 652 command.extend([ |
| 653 'list', |
| 654 '--browser', |
| 655 browser_type, |
| 656 '--json-output', |
| 657 BENCHMARKS_JSON_FILE]) |
| 658 try: |
| 659 output, return_code = bisect_utils.RunProcessAndRetrieveOutput( |
| 660 command=command, cwd=SRC_DIR) |
| 661 if return_code: |
| 662 raise RuntimeError('Something went wrong while listing benchmarks. ' |
| 663 'Please review the command line: %s.\nERROR: [%s]' % |
| 664 (' '.join(command), output)) |
| 665 with open(BENCHMARKS_JSON_FILE) as tests_json: |
| 666 tests_data = json.load(tests_json) |
| 667 if tests_data.get('steps'): |
| 668 return tests_data.get('steps').keys() |
| 669 finally: |
| 670 try: |
| 671 if os.path.exists(BENCHMARKS_JSON_FILE): |
| 672 os.remove(BENCHMARKS_JSON_FILE) |
| 673 except OSError as e: |
| 674 if e.errno != errno.ENOENT: |
| 675 raise |
| 676 return None |
| 677 |
| 678 |
| 679 def _OutputOverallResults(results): |
| 680 """Creates results step and prints results on buildbot job.""" |
| 681 test_status = all(current_value == True for current_value in results.values()) |
| 682 bisect_utils.OutputAnnotationStepStart( |
| 683 'Results - %s' % ('Passed' if test_status else 'Failed')) |
| 684 print |
| 685 print 'Results of benchmarks:' |
| 686 print |
| 687 for benchmark, result in results.iteritems(): |
| 688 print '%s: %s' % (benchmark, 'Passed' if result else 'Failed') |
| 689 if not test_status: |
| 690 bisect_utils.OutputAnnotationStepFailure() |
| 691 bisect_utils.OutputAnnotationStepClosed() |
| 692 # Returns 0 for success and 1 for failure. |
| 693 return 0 if test_status else 1 |
| 694 |
| 695 |
| 696 def _RunBenchmark(bisect_instance, opts, bot_name, benchmark_name): |
| 697 """Runs a Telemetry benchmark.""" |
| 698 bisect_utils.OutputAnnotationStepStart(benchmark_name) |
| 699 command_to_run = _GuessTelemetryTestCommand(bot_name, benchmark_name) |
| 700 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost()) |
| 701 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(args, SRC_DIR) |
| 702 # A value other than 0 indicates that the test couldn't be run, and results |
| 703 # should also include an error message. |
| 704 if return_code: |
| 705 print ('Error: Something went wrong running the benchmark: %s.' |
| 706 'Please review the command line:%s\n\n%s' % |
| 707 (benchmark_name, command_to_run, output)) |
| 708 bisect_utils.OutputAnnotationStepFailure() |
| 709 print output |
| 710 bisect_utils.OutputAnnotationStepClosed() |
| 711 # results[1] contains the return code from subprocess that executes test |
| 712 # command, On successful test run it contains 0 otherwise any non-zero value. |
| 713 return return_code == 0 |
| 714 |
| 715 |
| 716 def _RunBenchmarksForCommitQueue(config): |
| 717 """Runs Telemetry benchmark for the commit queue.""" |
| 718 os.chdir(SRC_DIR) |
| 719 # To determine the bot platform by reading buildbot name from environment |
| 720 # variable. |
| 721 bot_name = os.environ.get(BUILDBOT_BUILDERNAME) |
| 722 if not bot_name: |
| 723 bot_name = sys.platform |
| 724 bot_name = bot_name.split('_')[0] |
| 725 |
| 726 affected_benchmarks = _GetAffectedBenchmarkModuleNames() |
| 727 # Abort if there are no changes to benchmark any existing benchmark files. |
| 728 if not affected_benchmarks: |
| 729 bisect_utils.OutputAnnotationStepStart('Results') |
| 730 print |
| 731 print ('There are no modification to Telemetry benchmarks,' |
| 732 ' aborting the try job.') |
| 733 bisect_utils.OutputAnnotationStepClosed() |
| 734 return 0 |
| 735 |
| 736 # Bisect script expects to be run from the src directory |
| 737 # Gets required options inorder to create BisectPerformanceMetrics instance. |
| 738 # Since command is a required arg in BisectPerformanceMetrics, we just create |
| 739 # a dummy command for now. |
| 740 opts = _GetConfigBasedOnPlatform(config, bot_name, test_name='') |
| 741 annotations_dict = _GetStepAnnotationStringsDict(config) |
| 742 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd()) |
| 743 _RunBuildStepForPerformanceTest(b, |
| 744 annotations_dict.get('build1'), |
| 745 annotations_dict.get('sync1'), |
| 746 None) |
| 747 available_benchmarks = _ListAvailableBenchmarks(bot_name) |
| 748 overall_results = {} |
| 749 for affected_benchmark in affected_benchmarks: |
| 750 for benchmark in available_benchmarks: |
| 751 if (benchmark.startswith(affected_benchmark) and |
| 752 not benchmark.endswith('reference')): |
| 753 overall_results[benchmark] = _RunBenchmark(b, opts, bot_name, benchmark) |
| 754 |
| 755 return _OutputOverallResults(overall_results) |
| 756 |
| 757 |
569 def _OptionParser(): | 758 def _OptionParser(): |
570 """Returns the options parser for run-bisect-perf-regression.py.""" | 759 """Returns the options parser for run-bisect-perf-regression.py.""" |
| 760 |
| 761 def ConvertJson(option, _, value, parser): |
| 762 """Provides an OptionParser callback to unmarshal a JSON string.""" |
| 763 setattr(parser.values, option.dest, json.loads(value)) |
| 764 |
571 usage = ('%prog [options] [-- chromium-options]\n' | 765 usage = ('%prog [options] [-- chromium-options]\n' |
572 'Used by a try bot to run the bisection script using the parameters' | 766 'Used by a try bot to run the bisection script using the parameters' |
573 ' provided in the auto_bisect/bisect.cfg file.') | 767 ' provided in the auto_bisect/bisect.cfg file.') |
574 parser = optparse.OptionParser(usage=usage) | 768 parser = optparse.OptionParser(usage=usage) |
575 parser.add_option('-w', '--working_directory', | 769 parser.add_option('-w', '--working_directory', |
576 type='str', | 770 type='str', |
577 help='A working directory to supply to the bisection ' | 771 help='A working directory to supply to the bisection ' |
578 'script, which will use it as the location to checkout ' | 772 'script, which will use it as the location to checkout ' |
579 'a copy of the chromium depot.') | 773 'a copy of the chromium depot.') |
580 parser.add_option('-p', '--path_to_goma', | 774 parser.add_option('-p', '--path_to_goma', |
581 type='str', | 775 type='str', |
582 help='Path to goma directory. If this is supplied, goma ' | 776 help='Path to goma directory. If this is supplied, goma ' |
583 'builds will be enabled.') | 777 'builds will be enabled.') |
584 parser.add_option('--path_to_config', | 778 parser.add_option('--path_to_config', |
585 type='str', | 779 type='str', |
586 help='Path to the config file to use. If this is supplied, ' | 780 help='Path to the config file to use. If this is supplied, ' |
587 'the bisect script will use this to override the default ' | 781 'the bisect script will use this to override the default ' |
588 'config file path. The script will attempt to load it ' | 782 'config file path. The script will attempt to load it ' |
589 'as a bisect config first, then a perf config.') | 783 'as a bisect config first, then a perf config.') |
590 parser.add_option('--extra_src', | 784 parser.add_option('--extra_src', |
591 type='str', | 785 type='str', |
592 help='Path to extra source file. If this is supplied, ' | 786 help='Path to extra source file. If this is supplied, ' |
593 'bisect script will use this to override default behavior.') | 787 'bisect script will use this to override default behavior.') |
594 parser.add_option('--dry_run', | 788 parser.add_option('--dry_run', |
595 action="store_true", | 789 action="store_true", |
596 help='The script will perform the full bisect, but ' | 790 help='The script will perform the full bisect, but ' |
597 'without syncing, building, or running the performance ' | 791 'without syncing, building, or running the performance ' |
598 'tests.') | 792 'tests.') |
| 793 # This argument is passed by buildbot to supply build properties to the bisect |
| 794 # script. Note: Don't change "--build-properties" property name. |
| 795 parser.add_option('--build-properties', action='callback', |
| 796 dest='build_properties', |
| 797 callback=ConvertJson, type='string', |
| 798 nargs=1, default={}, |
| 799 help='build properties in JSON format') |
| 800 |
599 return parser | 801 return parser |
600 | 802 |
601 | 803 |
602 def main(): | 804 def main(): |
603 """Entry point for run-bisect-perf-regression.py. | 805 """Entry point for run-bisect-perf-regression.py. |
604 | 806 |
605 Reads the config file, and then tries to either bisect a regression or | 807 Reads the config file, and then tries to either bisect a regression or |
606 just run a performance test, depending on the particular config parameters | 808 just run a performance test, depending on the particular config parameters |
607 specified in the config file. | 809 specified in the config file. |
608 """ | 810 """ |
(...skipping 30 matching lines...) Expand all Loading... |
639 path_to_perf_cfg = os.path.join( | 841 path_to_perf_cfg = os.path.join( |
640 os.path.abspath(os.path.dirname(sys.argv[0])), | 842 os.path.abspath(os.path.dirname(sys.argv[0])), |
641 current_perf_cfg_file) | 843 current_perf_cfg_file) |
642 | 844 |
643 config = _LoadConfigFile(path_to_perf_cfg) | 845 config = _LoadConfigFile(path_to_perf_cfg) |
644 config_is_valid = _ValidatePerfConfigFile(config) | 846 config_is_valid = _ValidatePerfConfigFile(config) |
645 | 847 |
646 if config and config_is_valid: | 848 if config and config_is_valid: |
647 return _SetupAndRunPerformanceTest(config, opts.path_to_goma) | 849 return _SetupAndRunPerformanceTest(config, opts.path_to_goma) |
648 | 850 |
| 851 # If there are no changes to config file, then check if the request is |
| 852 # from commit-bot, if so then run the modified Telemetry benchmarks for the |
| 853 # patch. |
| 854 if opts.build_properties.get('requester') == 'commit-bot@chromium.org': |
| 855 return _SetupAndRunPerformanceTest( |
| 856 config={}, path_to_goma=opts.path_to_goma, is_cq_tryjob=True) |
| 857 |
649 print ('Error: Could not load config file. Double check your changes to ' | 858 print ('Error: Could not load config file. Double check your changes to ' |
650 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n') | 859 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n') |
651 return 1 | 860 return 1 |
652 | 861 |
653 | 862 |
654 if __name__ == '__main__': | 863 if __name__ == '__main__': |
655 sys.exit(main()) | 864 sys.exit(main()) |
OLD | NEW |