Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(162)

Side by Side Diff: tools/run-bisect-perf-regression.py

Issue 1045553003: Add support to conditionally running telemetry benchmarks on CQ. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2013 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2013 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Run Performance Test Bisect Tool 6 """Run Performance Test Bisect Tool
7 7
8 This script is used by a try bot to run the bisect script with the parameters 8 This script is used by a try bot to run the bisect script with the parameters
9 specified in the bisect config file. It checks out a copy of the depot in 9 specified in the bisect config file. It checks out a copy of the depot in
10 a subdirectory 'bisect' of the working directory provided, annd runs the 10 a subdirectory 'bisect' of the working directory provided, annd runs the
11 bisect scrip there. 11 bisect scrip there.
qyearsley 2015/03/31 22:21:25 Not related to this CL, but could do a drive-by fi
prasadv 2015/04/02 21:30:12 Done.
12 """ 12 """
13 13
14 import json
14 import optparse 15 import optparse
15 import os 16 import os
16 import platform 17 import platform
17 import re 18 import re
19 import shlex
18 import subprocess 20 import subprocess
19 import sys 21 import sys
20 import traceback 22 import traceback
21 23
22 from auto_bisect import bisect_perf_regression 24 from auto_bisect import bisect_perf_regression
23 from auto_bisect import bisect_utils 25 from auto_bisect import bisect_utils
24 from auto_bisect import math_utils 26 from auto_bisect import math_utils
25 from auto_bisect import source_control 27 from auto_bisect import source_control
26 28
27 CROS_BOARD_ENV = 'BISECT_CROS_BOARD' 29 CROS_BOARD_ENV = 'BISECT_CROS_BOARD'
28 CROS_IP_ENV = 'BISECT_CROS_IP' 30 CROS_IP_ENV = 'BISECT_CROS_IP'
29
30 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__)) 31 SCRIPT_DIR = os.path.abspath(os.path.dirname(__file__))
31 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir) 32 SRC_DIR = os.path.join(SCRIPT_DIR, os.path.pardir)
32 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg') 33 BISECT_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'auto_bisect', 'bisect.cfg')
33 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg') 34 RUN_TEST_CONFIG_PATH = os.path.join(SCRIPT_DIR, 'run-perf-test.cfg')
34 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join( 35 WEBKIT_RUN_TEST_CONFIG_PATH = os.path.join(
35 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg') 36 SRC_DIR, 'third_party', 'WebKit', 'Tools', 'run-perf-test.cfg')
36 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect') 37 BISECT_SCRIPT_DIR = os.path.join(SCRIPT_DIR, 'auto_bisect')
37 38
39 PERF_BENCHMARKS_PATH = 'tools/perf/benchmarks'
40 BUILDBOT_BUILDERNAME = 'BUILDBOT_BUILDERNAME'
41 BENCHMARKS_JSON_FILE = 'benchmarks.json'
qyearsley 2015/03/31 22:21:25 Optionally, you could add comments about these con
prasadv 2015/04/02 21:30:12 Done.
38 42
39 class Goma(object): 43 class Goma(object):
40 44
41 def __init__(self, path_to_goma): 45 def __init__(self, path_to_goma):
42 self._abs_path_to_goma = None 46 self._abs_path_to_goma = None
43 self._abs_path_to_goma_file = None 47 self._abs_path_to_goma_file = None
44 if not path_to_goma: 48 if not path_to_goma:
45 return 49 return
46 self._abs_path_to_goma = os.path.abspath(path_to_goma) 50 self._abs_path_to_goma = os.path.abspath(path_to_goma)
47 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh' 51 filename = 'goma_ctl.bat' if os.name == 'nt' else 'goma_ctl.sh'
(...skipping 382 matching lines...) Expand 10 before | Expand all | Expand 10 after
430 434
431 results_without_patch = _RunCommandStepForPerformanceTest( 435 results_without_patch = _RunCommandStepForPerformanceTest(
432 b, opts, False, True, annotations_dict['results_label2'], 436 b, opts, False, True, annotations_dict['results_label2'],
433 annotations_dict['run2']) 437 annotations_dict['run2'])
434 438
435 # Find the link to the cloud stored results file. 439 # Find the link to the cloud stored results file.
436 _ParseAndOutputCloudLinks( 440 _ParseAndOutputCloudLinks(
437 results_without_patch, results_with_patch, annotations_dict) 441 results_without_patch, results_with_patch, annotations_dict)
438 442
439 443
440 def _SetupAndRunPerformanceTest(config, path_to_goma): 444 def _SetupAndRunPerformanceTest(config, path_to_goma, cq_tryjob=False):
441 """Attempts to build and run the current revision with and without the 445 """Attempts to build and run the current revision with and without the
442 current patch, with the parameters passed in. 446 current patch, with the parameters passed in.
443 447
444 Args: 448 Args:
445 config: The config read from run-perf-test.cfg. 449 config: The config read from run-perf-test.cfg.
446 path_to_goma: Path to goma directory. 450 path_to_goma: Path to goma directory.
451 cq_tryjob: Determines if the try job is initiated by commit queue.
qyearsley 2015/03/31 21:29:24 This could be named is_cq_tryjob, since it's a boo
prasadv 2015/04/02 21:30:12 Done.
447 452
448 Returns: 453 Returns:
449 An exit code: 0 on success, otherwise 1. 454 An exit code: 0 on success, otherwise 1.
450 """ 455 """
451 if platform.release() == 'XP': 456 if platform.release() == 'XP':
452 print 'Windows XP is not supported for perf try jobs because it lacks ' 457 print 'Windows XP is not supported for perf try jobs because it lacks '
453 print 'goma support. Please refer to crbug.com/330900.' 458 print 'goma support. Please refer to crbug.com/330900.'
454 return 1 459 return 1
455 try: 460 try:
456 with Goma(path_to_goma) as _: 461 with Goma(path_to_goma) as _:
457 config['use_goma'] = bool(path_to_goma) 462 config['use_goma'] = bool(path_to_goma)
458 if config['use_goma']: 463 if config['use_goma']:
459 config['goma_dir'] = os.path.abspath(path_to_goma) 464 config['goma_dir'] = os.path.abspath(path_to_goma)
460 _RunPerformanceTest(config) 465 if not cq_tryjob:
466 _RunPerformanceTest(config)
467 else:
468 return _RunBenchmarksForCommitQueue(config)
461 return 0 469 return 0
462 except RuntimeError, e: 470 except RuntimeError, e:
463 bisect_utils.OutputAnnotationStepFailure() 471 bisect_utils.OutputAnnotationStepFailure()
464 bisect_utils.OutputAnnotationStepClosed() 472 bisect_utils.OutputAnnotationStepClosed()
465 _OutputFailedResults('Error: %s' % e.message) 473 _OutputFailedResults('Error: %s' % e.message)
466 return 1 474 return 1
467 475
468 476
469 def _RunBisectionScript( 477 def _RunBisectionScript(
470 config, working_directory, path_to_goma, path_to_extra_src, dry_run): 478 config, working_directory, path_to_goma, path_to_extra_src, dry_run):
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after
559 def _PrintConfigStep(config): 567 def _PrintConfigStep(config):
560 """Prints out the given config, along with Buildbot annotations.""" 568 """Prints out the given config, along with Buildbot annotations."""
561 bisect_utils.OutputAnnotationStepStart('Config') 569 bisect_utils.OutputAnnotationStepStart('Config')
562 print 570 print
563 for k, v in config.iteritems(): 571 for k, v in config.iteritems():
564 print ' %s : %s' % (k, v) 572 print ' %s : %s' % (k, v)
565 print 573 print
566 bisect_utils.OutputAnnotationStepClosed() 574 bisect_utils.OutputAnnotationStepClosed()
567 575
568 576
577 def _GetBrowserType(bot_platform):
578 """Gets the browser type to be used in the run benchmark command."""
579 # For Telemetry tests, we need to specify the browser,
580 # and the browser to use may depend on the platform.
qyearsley 2015/03/31 21:29:23 This comment seems to me like it could be omitted.
prasadv 2015/04/02 21:30:12 Done.
581 if bot_platform == 'android':
582 browser = 'android-chrome-shell'
583 elif 'x64' in bot_platform:
584 browser = 'release_x64'
585 else:
586 browser = 'release'
587 return browser
qyearsley 2015/03/31 21:29:25 Slightly shorter alternative: def _GetBrowserType
prasadv 2015/04/02 21:30:12 Done.
588
589
590 def _GuessTestCommand(bot_platform, test_name=None):
qyearsley 2015/03/31 21:29:25 1. Possible alternate name: _GuessTelemetryTestCom
prasadv 2015/04/02 21:30:12 Done
qyearsley 2015/04/02 21:51:05 Although, it looks like everywhere in this file wh
591 """Creates a telemetry benchmark command based on bot and test name."""
qyearsley 2015/03/31 21:29:25 For consistency, I would capitalize Telemetry ever
prasadv 2015/04/02 21:30:11 Done.
592 command = []
593 # On Windows, Python scripts should be prefixed with the python command.
594 if bot_platform == 'win':
595 command.append('python')
qyearsley 2015/03/31 21:29:23 I wonder whether we could add "python" for all pla
prasadv 2015/04/02 21:30:12 I think thi should be possible, but I just followe
596 command.append('tools/perf/run_benchmark')
597 command.append('-v')
598 command.append('--browser=%s' % _GetBrowserType(bot_platform))
599 if test_name:
600 command.append(test_name)
601
602 return ' '.join(command)
603
604
605 def _GetConfigBasedOnPlatform(config, bot_name, test_name):
606 """Generates required options to create BisectPerformanceMetrics instance."""
607 opts_dict = {}
608 opts_dict['command'] = _GuessTestCommand(bot_name, test_name)
609 if config.has_key('use_goma'):
qyearsley 2015/03/31 21:29:24 Equivalent: if 'use_goma' in config: ... (has_k
prasadv 2015/04/02 21:30:12 Done.
610 opts_dict['use_goma'] = config['use_goma']
611 if config.has_key('goma_dir'):
612 opts_dict['goma_dir'] = config['goma_dir']
613
614 opts_dict['target_arch'] = 'x64' if 'x64' in bot_name else 'ia32'
615
616 opts_dict['build_preference'] = 'ninja'
617 opts_dict['output_buildbot_annotations'] = True
618
619 if 'android-chrome-shell' in opts_dict['command']:
620 opts_dict['target_platform'] = 'android'
621
622 opts_dict['repeat_test_count'] = 1
623 opts_dict['bisect_mode'] = bisect_utils.BISECT_MODE_RETURN_CODE
qyearsley 2015/03/31 21:29:24 Alternate structure for this function: First put r
prasadv 2015/04/02 21:30:12 Done.
624
625 return bisect_perf_regression.BisectOptions.FromDict(opts_dict)
626
627
628 def _GetModifiedFilesFromPatch(cwd=None):
629 """Gets list of files modified in the current patch."""
630 log_output = bisect_utils.CheckRunGit(
631 ['diff', '--no-ext-diff', '--name-only', 'HEAD~1'], cwd=cwd)
632 modified_files = log_output.split()
633 return modified_files
634
635
636 def _GetAllAffectedBenchmarks():
637 """Gets list of modified benchmark files under tools/perf/benchmarks."""
qyearsley 2015/03/31 21:29:24 Technically, I wouldn't say that this necessarily
prasadv 2015/04/02 21:30:12 Done.
638 all_affected_files = _GetModifiedFilesFromPatch()
639 modified_benchmarks = []
640 for affected_file in all_affected_files:
641 if affected_file.startswith(PERF_BENCHMARKS_PATH):
642 benchamrk = os.path.basename(os.path.splitext(affected_file)[0])
643 modified_benchmarks.append(benchamrk)
qyearsley 2015/03/31 21:29:24 benchamrk -> benchmark
prasadv 2015/04/02 21:30:12 Done.
644 return modified_benchmarks
645
646
647 def _ListAvailableBenchmarks(bot_platform):
648 """Gets all available benchmarks names as a list."""
649 browser_type = _GetBrowserType(bot_platform)
650 if os.path.exists(BENCHMARKS_JSON_FILE):
651 os.remove(BENCHMARKS_JSON_FILE)
652 command = []
653 if 'win' in bot_platform:
654 command.append('python')
655 command.append('tools/perf/run_benchmark')
656 command.extend([
657 'list',
658 '--browser',
659 browser_type,
660 '--json-output',
661 BENCHMARKS_JSON_FILE])
qyearsley 2015/03/31 22:21:25 Would it be possible to get the benchmarks list wi
prasadv 2015/04/02 21:30:12 I tried using "list" option to list all benchmarks
662 try:
663 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(
664 command=command, cwd=SRC_DIR)
665 if return_code:
666 raise RuntimeError('Something went wrong while listing benchmarks. '
667 'Please review the command line: %s.\nERROR: [%s]' %
668 (' '.join(command), output))
669 with open(BENCHMARKS_JSON_FILE) as tests_json:
670 tests_data = json.load(tests_json)
671 if tests_data.get('steps'):
672 return tests_data.get('steps').keys()
673 finally:
674 try:
675 if os.path.exists(BENCHMARKS_JSON_FILE):
676 os.remove(BENCHMARKS_JSON_FILE)
677 except OSError as e:
678 if e.errno != errno.ENOENT:
679 raise
680 return None
681
682
683 def _OutputOverallResults(results):
684 """Creates results step and prints results on buildbot job."""
685 test_status = all(current_value == True for current_value in results.values())
686 bisect_utils.OutputAnnotationStepStart(
687 'Results - %s' % ('Passed' if test_status else 'Failed'))
688 print
689 print 'Results of benchamrks:'
qyearsley 2015/03/31 21:29:24 benchamrks -> benchmarks
prasadv 2015/04/02 21:30:13 Done.
690 print
691 for benchmark, result in results.iteritems():
692 print '%s: %s' % (benchmark, 'Passed' if result else 'Failed')
693 if not test_status:
694 bisect_utils.OutputAnnotationStepFailure()
695 bisect_utils.OutputAnnotationStepClosed()
696 # Returns 0 for success and 1 for failure.
697 return not test_status
qyearsley 2015/03/31 21:29:23 This function will return False for success and Tr
prasadv 2015/04/02 21:30:11 Correct, here we are returning the overall exit st
698
699
700 def _RunBenchmark(bisect_instance, opts, bot_name, benchmark_name):
701 """Runs a telemetry benchmark."""
702 bisect_utils.OutputAnnotationStepStart(benchmark_name)
703 command_to_run = _GuessTestCommand(bot_name, benchmark_name)
704 args = shlex.split(command_to_run, posix=not bisect_utils.IsWindowsHost())
705 output, return_code = bisect_utils.RunProcessAndRetrieveOutput(args, SRC_DIR)
706 # A value other than 0 indicates that the test couldn't be run, and results
707 # should also include an error message.
708 if return_code:
709 print ('Error: Something went wrong running the benchmark: %s.'
710 'Please review the command line:%s\n\n%s' %
711 (benchmark_name, command_to_run, output))
712 bisect_utils.OutputAnnotationStepFailure()
713 print output
714 bisect_utils.OutputAnnotationStepClosed()
715 # results[1] contains the return code from subprocess that executes test
716 # command, On successful test run it contains 0 otherwise any non-zero value.
717 return return_code == 0
718
719
720 def _RunBenchmarksForCommitQueue(config):
721 """Runs telemetry benchmark for the commit queue."""
722 os.chdir(SRC_DIR)
723 # To determine the bot platform by reading buildbot name from environment
724 # variable.
725 bot_name = os.environ.get(BUILDBOT_BUILDERNAME)
726 if not bot_name:
727 bot_name = sys.platform
728 bot_name = bot_name.split('_')[0]
729
730 affected_benchmarks = _GetAllAffectedBenchmarks()
731 # Abort if there are no changes to benchmark any existing benchmark files.
732 if not affected_benchmarks:
733 bisect_utils.OutputAnnotationStepStart('Results')
734 print
735 print ('There are no modification to telemetry benchmarks,'
736 ' aborting the try job.')
737 bisect_utils.OutputAnnotationStepClosed()
738 return 0
739
740 # Bisect script expects to be run from the src directory
741 # Gets required options inorder to create BisectPerformanceMetrics instance.
742 # Since command is a required arg in BisectPerformanceMetrics, we just create
743 # a dummy command for now.
744 opts = _GetConfigBasedOnPlatform(config, bot_name, test_name='')
745 annotations_dict = _GetStepAnnotationStringsDict(config)
746 b = bisect_perf_regression.BisectPerformanceMetrics(opts, os.getcwd())
747 _RunBuildStepForPerformanceTest(b,
748 annotations_dict.get('build1'),
749 annotations_dict.get('sync1'),
750 None)
751 available_benchmarks = _ListAvailableBenchmarks(bot_name)
752 overall_results = {}
753 for affected_benchmark in affected_benchmarks:
754 for benchmark in available_benchmarks:
755 if (benchmark.startswith(affected_benchmark) and
756 not benchmark.endswith('reference')):
757 overall_results[benchmark] = _RunBenchmark(b, opts, bot_name, benchmark)
758
759 return _OutputOverallResults(overall_results)
760
761 def convert_json(option, _, value, parser):
qyearsley 2015/03/31 21:29:24 Nit: Should be CamelCase. Could be put inside the
prasadv 2015/04/02 21:30:13 Done.
762 """Provide an OptionParser callback to unmarshal a JSON string."""
763 setattr(parser.values, option.dest, json.loads(value))
764
765
766
569 def _OptionParser(): 767 def _OptionParser():
570 """Returns the options parser for run-bisect-perf-regression.py.""" 768 """Returns the options parser for run-bisect-perf-regression.py."""
571 usage = ('%prog [options] [-- chromium-options]\n' 769 usage = ('%prog [options] [-- chromium-options]\n'
572 'Used by a try bot to run the bisection script using the parameters' 770 'Used by a try bot to run the bisection script using the parameters'
573 ' provided in the auto_bisect/bisect.cfg file.') 771 ' provided in the auto_bisect/bisect.cfg file.')
574 parser = optparse.OptionParser(usage=usage) 772 parser = optparse.OptionParser(usage=usage)
575 parser.add_option('-w', '--working_directory', 773 parser.add_option('-w', '--working_directory',
576 type='str', 774 type='str',
577 help='A working directory to supply to the bisection ' 775 help='A working directory to supply to the bisection '
578 'script, which will use it as the location to checkout ' 776 'script, which will use it as the location to checkout '
(...skipping 10 matching lines...) Expand all
589 'as a bisect config first, then a perf config.') 787 'as a bisect config first, then a perf config.')
590 parser.add_option('--extra_src', 788 parser.add_option('--extra_src',
591 type='str', 789 type='str',
592 help='Path to extra source file. If this is supplied, ' 790 help='Path to extra source file. If this is supplied, '
593 'bisect script will use this to override default behavior.') 791 'bisect script will use this to override default behavior.')
594 parser.add_option('--dry_run', 792 parser.add_option('--dry_run',
595 action="store_true", 793 action="store_true",
596 help='The script will perform the full bisect, but ' 794 help='The script will perform the full bisect, but '
597 'without syncing, building, or running the performance ' 795 'without syncing, building, or running the performance '
598 'tests.') 796 'tests.')
797 # This argument is passed by buildbot to supply build properties to the bisect
798 # script. Note: Don't change "--build-properties" property name.
799 parser.add_option('--build-properties', action='callback',
800 dest='build_properties',
801 callback=convert_json, type='string',
802 nargs=1, default={},
803 help='build properties in JSON format')
qyearsley 2015/03/31 21:29:24 Nit: Alignment
prasadv 2015/04/02 21:30:12 Done.
804
599 return parser 805 return parser
600 806
601 807
602 def main(): 808 def main():
603 """Entry point for run-bisect-perf-regression.py. 809 """Entry point for run-bisect-perf-regression.py.
604 810
605 Reads the config file, and then tries to either bisect a regression or 811 Reads the config file, and then tries to either bisect a regression or
606 just run a performance test, depending on the particular config parameters 812 just run a performance test, depending on the particular config parameters
607 specified in the config file. 813 specified in the config file.
608 """ 814 """
(...skipping 30 matching lines...) Expand all
639 path_to_perf_cfg = os.path.join( 845 path_to_perf_cfg = os.path.join(
640 os.path.abspath(os.path.dirname(sys.argv[0])), 846 os.path.abspath(os.path.dirname(sys.argv[0])),
641 current_perf_cfg_file) 847 current_perf_cfg_file)
642 848
643 config = _LoadConfigFile(path_to_perf_cfg) 849 config = _LoadConfigFile(path_to_perf_cfg)
644 config_is_valid = _ValidatePerfConfigFile(config) 850 config_is_valid = _ValidatePerfConfigFile(config)
645 851
646 if config and config_is_valid: 852 if config and config_is_valid:
647 return _SetupAndRunPerformanceTest(config, opts.path_to_goma) 853 return _SetupAndRunPerformanceTest(config, opts.path_to_goma)
648 854
855 # If there are no changes to config file, then check if the request is
856 # from commit-bot, if so then run the modified telemetry benchmarks for the
857 # patch.
858 if opts.build_properties.get('requester') == 'commit-bot@chromium.org':
859 return _SetupAndRunPerformanceTest(
860 config={}, path_to_goma=opts.path_to_goma, cq_tryjob=True)
qyearsley 2015/03/31 21:29:25 How is it determined which benchmark(s) to run?
qyearsley 2015/03/31 22:21:25 Never mind -- Upon reading the code more, I can s
prasadv 2015/04/02 21:30:12 Done.
prasadv 2015/04/02 21:30:12 Done.
861
649 print ('Error: Could not load config file. Double check your changes to ' 862 print ('Error: Could not load config file. Double check your changes to '
650 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n') 863 'auto_bisect/bisect.cfg or run-perf-test.cfg for syntax errors.\n')
651 return 1 864 return 1
652 865
653 866
654 if __name__ == '__main__': 867 if __name__ == '__main__':
655 sys.exit(main()) 868 sys.exit(main())
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698