Chromium Code Reviews| Index: tools/run-bisect-perf-regression.py |
| diff --git a/tools/run-bisect-perf-regression.py b/tools/run-bisect-perf-regression.py |
| index 0181ef73c136bfd5fedddc7fdfd6612518791915..9d0a767f0b77983254db8cfc22704f798bf1c046 100755 |
| --- a/tools/run-bisect-perf-regression.py |
| +++ b/tools/run-bisect-perf-regression.py |
| @@ -19,26 +19,31 @@ import subprocess |
| import sys |
| import traceback |
| +import bisect_utils |
| +bisect = imp.load_source('bisect-perf-regression', |
| + os.path.join(os.path.abspath(os.path.dirname(sys.argv[0])), |
| + 'bisect-perf-regression.py')) |
| + |
| + |
| CROS_BOARD_ENV = 'BISECT_CROS_BOARD' |
| CROS_IP_ENV = 'BISECT_CROS_IP' |
| -def LoadConfigFile(path_to_file): |
| - """Attempts to load the file 'run-bisect-perf-regression.cfg' as a module |
| +def _LoadConfigFile(path_to_file): |
| + """Attempts to load the specified config file as a module |
| and grab the global config dict. |
| Args: |
| - path_to_file: Path to the run-bisect-perf-regression.cfg file. |
| + path_to_file: Path to the file. |
| Returns: |
| The config dict which should be formatted as follows: |
| {'command': string, 'good_revision': string, 'bad_revision': string |
| - 'metric': string}. |
| + 'metric': string, etc...}. |
| Returns None on failure. |
| """ |
| try: |
| local_vars = {} |
| - execfile(os.path.join(path_to_file, 'run-bisect-perf-regression.cfg'), |
| - local_vars) |
| + execfile(path_to_file, local_vars) |
| return local_vars['config'] |
| except: |
| @@ -48,7 +53,195 @@ def LoadConfigFile(path_to_file): |
| return None |
| -def RunBisectionScript(config, working_directory, path_to_file, path_to_goma): |
| +def _GetGOMAExecutable(path_to_goma): |
|
tonyg
2013/10/17 01:01:32
I like the refactor so far, but you've got an easy
shatch
2013/10/17 21:59:08
Neat, didn't know about that. Planning on making a
|
| + if os.name == 'nt': |
| + return os.path.join(path_to_goma, 'goma_ctl.bat') |
| + else: |
| + return os.path.join(path_to_goma, 'goma_ctl.sh') |
| + |
| + |
| +def _SetupAndStartGOMA(path_to_goma): |
| + """Sets up GOMA and launches it. |
| + |
| + Args: |
| + path_to_goma: Path to goma directory. |
| + |
| + Returns: |
| + True if successful.""" |
| + abs_path_to_goma = os.path.abspath(path_to_goma) |
| + goma_file = _GetGOMAExecutable(abs_path_to_goma) |
| + |
| + if os.name == 'nt': |
| + os.environ['CC'] = os.path.join(abs_path_to_goma, 'gomacc.exe') + ' cl.exe' |
| + os.environ['CXX'] = os.path.join(abs_path_to_goma, 'gomacc.exe') + ' cl.exe' |
| + else: |
| + os.environ['PATH'] = os.pathsep.join([abs_path_to_goma, os.environ['PATH']]) |
| + |
| + # Sometimes goma is lingering around if something went bad on a previous |
| + # run. Stop it before starting a new process. Can ignore the return code |
| + # since it will return an error if it wasn't running. |
| + _StopGOMA(path_to_goma) |
| + |
| + return not subprocess.call([goma_file, 'start']) |
| + |
| + |
| +def _StopGOMA(path_to_goma): |
| + abs_path_to_goma = os.path.abspath(path_to_goma) |
| + goma_file = _GetGOMAExecutable(abs_path_to_goma) |
| + subprocess.call([goma_file, 'stop']) |
| + |
| + |
| +def _OutputFailedResults(text_to_print): |
| + bisect_utils.OutputAnnotationStepStart('Results - Failed') |
| + print text_to_print |
| + bisect_utils.OutputAnnotationStepClosed() |
| + |
| + |
| +def _CreateBisectOptionsFromConfig(config): |
| + opts_dict = {} |
| + opts_dict['command'] = config['command'] |
| + opts_dict['metric'] = config['metric'] |
| + |
| + if config['repeat_count']: |
| + opts_dict['repeat_test_count'] = int(config['repeat_count']) |
| + |
| + if config['truncate_percent']: |
| + opts_dict['truncate_percent'] = int(config['truncate_percent']) |
| + |
| + if config['max_time_minutes']: |
| + opts_dict['max_time_minutes'] = int(config['max_time_minutes']) |
| + |
| + if config.has_key('use_goma'): |
| + opts_dict['use_goma'] = config['use_goma'] |
| + |
| + opts_dict['build_preference'] = 'ninja' |
| + opts_dict['output_buildbot_annotations'] = True |
| + |
| + if '--browser=cros' in config['command']: |
| + opts_dict['target_platform'] = 'cros' |
| + |
| + if os.environ[CROS_BOARD_ENV] and os.environ[CROS_IP_ENV]: |
| + opts_dict['cros_board'] = os.environ[CROS_BOARD_ENV] |
| + opts_dict['cros_remote_ip'] = os.environ[CROS_IP_ENV] |
| + else: |
| + raise RuntimeError('Cros build selected, but BISECT_CROS_IP or' |
| + 'BISECT_CROS_BOARD undefined.') |
| + elif 'android' in config['command']: |
| + opts_dict['target_platform'] = 'android' |
| + |
| + return bisect.BisectOptions.FromDict(opts_dict) |
| + |
| + |
| +def _RunPerformancetest(config, path_to_file): |
|
tonyg
2013/10/17 01:01:32
Capitalize Test?
shatch
2013/10/17 21:59:08
Done.
|
| + # Bisect script expects to be run from src |
| + os.chdir(os.path.join(path_to_file, '..')) |
| + |
| + bisect_utils.OutputAnnotationStepStart('Building With Patch') |
| + |
| + opts = _CreateBisectOptionsFromConfig(config) |
| + b = bisect.BisectPerformanceMetrics(None, opts) |
| + |
| + if bisect_utils.RunGClient(['runhooks']): |
| + raise RuntimeError('Failed to run gclient runhooks') |
| + |
| + if not b.BuildCurrentRevision('chromium'): |
| + raise RuntimeError('Patched version failed to build.') |
| + |
| + bisect_utils.OutputAnnotationStepClosed() |
| + bisect_utils.OutputAnnotationStepStart('Running With Patch') |
| + |
| + results_with_patch = b.RunPerformanceTestAndParseResults( |
| + opts.command, opts.metric, reset_on_first_run=True) |
| + |
| + if results_with_patch[1]: |
| + raise RuntimeError('Patched version failed to run performance test.') |
| + |
| + bisect_utils.OutputAnnotationStepClosed() |
| + |
| + bisect_utils.OutputAnnotationStepStart('Reverting Patch') |
| + if bisect_utils.RunGClient(['revert']): |
| + raise RuntimeError('Failed to run gclient runhooks') |
| + bisect_utils.OutputAnnotationStepClosed() |
| + |
| + bisect_utils.OutputAnnotationStepStart('Building Without Patch') |
| + |
| + if bisect_utils.RunGClient(['runhooks']): |
| + raise RuntimeError('Failed to run gclient runhooks') |
| + |
| + if not b.BuildCurrentRevision('chromium'): |
| + raise RuntimeError('Unpatched version failed to build.') |
| + |
| + bisect_utils.OutputAnnotationStepClosed() |
| + bisect_utils.OutputAnnotationStepStart('Running Without Patch') |
| + |
| + results_without_patch = b.RunPerformanceTestAndParseResults( |
| + opts.command, opts.metric, upload_on_last_run=True) |
| + |
| + if results_without_patch[1]: |
| + raise RuntimeError('Unpatched version failed to run performance test.') |
| + |
| + # Find the link to the cloud stored results file. |
| + output = results_without_patch[2] |
| + cloud_file_link = [t for t in output.splitlines() if 'View online at' in t] |
|
tonyg
2013/10/17 01:01:32
Just realized a minor wrinkle. It won't be obvious
shatch
2013/10/17 21:59:08
Sure, I'll get a separate CL going to pass labels
|
| + if cloud_file_link: |
| + cloud_file_link = cloud_file_link[0] |
| + else: |
| + cloud_file_link = '' |
| + |
| + bisect_utils.OutputAnnotationStepClosed() |
| + if cloud_file_link: |
| + bisect_utils.OutputAnnotationStepStart('Results - %s' % cloud_file_link) |
|
tonyg
2013/10/17 01:01:32
I think what we want is:
OutputAnnotationStepStar
shatch
2013/10/17 21:59:08
Done.
|
| + else: |
| + bisect_utils.OutputAnnotationStepStart('Results') |
| + print ' %s %s %s' % (''.center(10, ' '), 'Mean'.center(20, ' '), |
| + 'Std. Error'.center(20, ' ')) |
| + print ' %s %s %s' % ('Patch'.center(10, ' '), |
| + ('%.02f' % results_with_patch[0]['mean']).center(20, ' '), |
| + ('%.02f' % results_with_patch[0]['std_err']).center(20, ' ')) |
| + print ' %s %s %s' % ('No Patch'.center(10, ' '), |
| + ('%.02f' % results_without_patch[0]['mean']).center(20, ' '), |
| + ('%.02f' % results_without_patch[0]['std_err']).center(20, ' ')) |
| + print cloud_file_link |
| + bisect_utils.OutputAnnotationStepClosed() |
| + |
| + |
| +def _SetupAndRunPerformanceTest(config, path_to_file, path_to_goma): |
| + """Attempts to build and run the current revision with and without the |
| + current patch, with the parameters passed in. |
| + |
| + Args: |
| + config: The config read from run-perf-test.cfg. |
| + path_to_file: Path to the bisect-perf-regression.py script. |
| + path_to_goma: Path to goma directory. |
| + |
| + Returns: |
| + 0 on success, otherwise 1. |
| + """ |
| + if path_to_goma: |
| + config['use_goma'] = True |
| + if not _SetupAndStartGOMA(path_to_goma): |
| + _OutputFailedResults('Error: goma failed to start.') |
| + return 1 |
| + |
| + cwd = os.getcwd() |
| + try: |
| + _RunPerformancetest(config, path_to_file) |
| + return 0 |
| + except RuntimeError, e: |
| + bisect_utils.OutputAnnotationStepClosed() |
| + _OutputFailedResults('Error: %s' % e.message) |
| + return 1 |
| + finally: |
| + os.chdir(cwd) |
| + if path_to_goma: |
| + _StopGOMA(path_to_goma) |
| + |
| + |
| +def _RunBisectionScript(config, working_directory, path_to_file, path_to_goma): |
| """Attempts to execute src/tools/bisect-perf-regression.py with the parameters |
| passed in. |
| @@ -98,37 +291,20 @@ def RunBisectionScript(config, working_directory, path_to_file, path_to_goma): |
| if 'android' in config['command']: |
| cmd.extend(['--target_platform', 'android']) |
| - goma_file = '' |
| if path_to_goma: |
| - path_to_goma = os.path.abspath(path_to_goma) |
| - |
| - if os.name == 'nt': |
| - os.environ['CC'] = os.path.join(path_to_goma, 'gomacc.exe') + ' cl.exe' |
| - os.environ['CXX'] = os.path.join(path_to_goma, 'gomacc.exe') + ' cl.exe' |
| - goma_file = os.path.join(path_to_goma, 'goma_ctl.bat') |
| - else: |
| - os.environ['PATH'] = os.pathsep.join([path_to_goma, os.environ['PATH']]) |
| - goma_file = os.path.join(path_to_goma, 'goma_ctl.sh') |
| - |
| - cmd.append('--use_goma') |
| - |
| - # Sometimes goma is lingering around if something went bad on a previous |
| - # run. Stop it before starting a new process. Can ignore the return code |
| - # since it will return an error if it wasn't running. |
| - subprocess.call([goma_file, 'stop']) |
| - |
| - return_code = subprocess.call([goma_file, 'start']) |
| - if return_code: |
| + if not _SetupAndStartGOMA(path_to_goma): |
| print 'Error: goma failed to start.' |
| - return return_code |
| + return 1 |
| + |
| + cmd.append('--use_goma') |
| cmd = [str(c) for c in cmd] |
| return_code = subprocess.call(cmd) |
| if path_to_goma: |
| - subprocess.call([goma_file, 'stop']) |
| + _StopGOMA(path_to_goma) |
| if return_code: |
| print 'Error: bisect-perf-regression.py returned with error %d' %\ |
| @@ -156,23 +332,38 @@ def main(): |
| 'builds will be enabled.') |
| (opts, args) = parser.parse_args() |
| - if not opts.working_directory: |
| - print 'Error: missing required parameter: --working_directory' |
| - parser.print_help() |
| - return 1 |
| + path_to_current_directory = os.path.abspath(os.path.dirname(sys.argv[0])) |
| + path_to_bisect_cfg = os.path.join(path_to_current_directory, |
| + 'run-bisect-perf-regression.cfg') |
| - path_to_file = os.path.abspath(os.path.dirname(sys.argv[0])) |
| + config = _LoadConfigFile(path_to_bisect_cfg) |
| - config = LoadConfigFile(path_to_file) |
| - if not config: |
| - print 'Error: Could not load config file. Double check your changes to '\ |
| - 'run-bisect-perf-regression.cfg for syntax errors.' |
| - return 1 |
| + # Check if the config is empty |
| + config_has_values = [v for v in config.values() if v] |
| + |
| + if config and config_has_values: |
| + if not opts.working_directory: |
| + print 'Error: missing required parameter: --working_directory' |
| + parser.print_help() |
| + return 1 |
| - return RunBisectionScript(config, opts.working_directory, path_to_file, |
| - opts.path_to_goma) |
| + return _RunBisectionScript(config, opts.working_directory, |
| + path_to_current_directory, opts.path_to_goma) |
| + else: |
| + path_to_perf_cfg = os.path.join( |
| + os.path.abspath(os.path.dirname(sys.argv[0])), 'run-perf-test.cfg') |
| + |
| + config = _LoadConfigFile(path_to_perf_cfg) |
| + |
| + if config: |
| + return _SetupAndRunPerformanceTest(config, path_to_current_directory, |
| + opts.path_to_goma) |
| + else: |
| + print 'Error: Could not load config file. Double check your changes to '\ |
| + 'run-bisect-perf-regression.cfg for syntax errors.' |
| + return 1 |
| if __name__ == '__main__': |