Chromium Code Reviews| Index: tools/auto_bisect/configs/try.py |
| diff --git a/tools/auto_bisect/configs/try.py b/tools/auto_bisect/configs/try.py |
| new file mode 100755 |
| index 0000000000000000000000000000000000000000..aefd8a7510379d7e22f744684fbff079f535c985 |
| --- /dev/null |
| +++ b/tools/auto_bisect/configs/try.py |
| @@ -0,0 +1,124 @@ |
| +#!/usr/bin/env python |
| + |
| +"""Starts bisect try jobs on multiple platforms using known-good configs. |
| + |
| +The purpose of this script is to serve as an integration test for the |
| +auto-bisect project by starting try jobs for various config types and |
| +various platforms. |
| + |
| +The known-good configs are in this same directory as this script. They |
| +are expected to all end in ".cfg" and start with the name of the platform |
| +followed by a dot. |
| + |
| +You can specify --full to try running each config on all applicable bots; |
| +the default behavior is to try each config on only one bot. |
| +""" |
| + |
| +import argparse |
| +import logging |
| +import os |
| +import subprocess |
| +import sys |
| + |
| +SCRIPT_DIR = os.path.dirname(__file__) |
| +BISECT_CONFIG = os.path.join(SCRIPT_DIR, os.path.pardir, 'bisect.cfg') |
| +PERF_TEST_CONFIG = os.path.join( |
| + SCRIPT_DIR, os.path.pardir, os.path.pardir, 'run-perf-test.cfg') |
| +PLATFORM_BOT_MAP = { |
| + 'linux': ['linux_perf_bot'], |
| + 'mac': ['mac_perf_bisect', 'mac_10_9_perf_bisect'], |
| + 'win': ['win_perf_bisect', 'win_8_perf_bisect', 'win_xp_perf_bisect'], |
| + 'android': [ |
| + 'android_nexus4_perf_bisect', |
| + 'android_nexus5_perf_bisect', |
| + 'android_nexus7_perf_bisect', |
| + 'android_nexus10_perf_bisect', |
| + ], |
| +} |
| +SVN_URL = 'svn://svn.chromium.org/chrome-try/try-perf' |
| +COMMIT_MESSAGE = 'Automatic commit.' |
| + |
| + |
| +def main(argv): |
| + parser = argparse.ArgumentParser(description=__doc__) |
| + parser.add_argument('--full', action='store_true', |
| + help='Run each config on all applicable bots.') |
| + parser.add_argument('--filter', help='Filter config filenames to use.') |
| + parser.add_argument('--verbose', '-v', action='store_true') |
| + args = parser.parse_args(argv[1:]) |
| + _SetupLogging(args.verbose) |
| + source_configs = _SourceConfigs(args.filter) |
| + logging.debug('Source configs: %s', source_configs) |
| + try: |
| + _StartTryJobs(source_configs, args.full) |
| + except subprocess.CalledProcessError as error: |
| + print str(error) |
| + print error.output |
| + |
| + |
| +def _SetupLogging(verbose): |
| + level = logging.INFO |
| + if verbose: |
| + level = logging.DEBUG |
| + logging.basicConfig(level=level) |
| + |
| + |
| +def _SourceConfigs(name_filter): |
| + files = os.listdir(SCRIPT_DIR) |
| + files = [os.path.join(SCRIPT_DIR, name) for name in files] |
| + files = [name for name in files if name.endswith('.cfg')] |
| + if name_filter: |
| + files = [name for name in files if name_filter in name] |
| + return files |
| + |
| + |
| +def _StartTryJobs(source_configs, full_mode): |
| + for source_config in source_configs: |
| + dest_config = _DestConfig(source_config) |
| + bot_names = _BotNames(source_config, full_mode) |
| + for bot_name in bot_names: |
| + logging.info('Trying %s on %s.', source_config, bot_name) |
| + _StartTry(source_config, dest_config, bot_name) |
| + |
| + |
| +def _DestConfig(source_config): |
| + if 'bisect' in source_config: |
| + return BISECT_CONFIG |
| + assert 'perf_test' in source_config, source_config |
| + return PERF_TEST_CONFIG |
| + |
| + |
| +def _BotNames(source_config, full_mode): |
| + platform = os.path.basename(source_config).split('.')[0] |
| + assert platform in PLATFORM_BOT_MAP, platform |
| + bot_names = PLATFORM_BOT_MAP[platform] |
| + if full_mode: |
| + return bot_names |
| + return [bot_names[0]] |
| + |
| + |
| +def _StartTry(source_config, dest_config, bot_name): |
| + |
| + assert os.path.exists(source_config) |
| + assert os.path.exists(dest_config) |
| + assert _LastCommitMessage() != COMMIT_MESSAGE, repr(_LastCommitMessage()) |
| + _Run(['cp', source_config, dest_config]) |
| + _Run(['git', 'commit', '--all', '-m', COMMIT_MESSAGE]) |
| + _Run(['git', 'try', '--svn_repo', SVN_URL, '--bot', bot_name]) |
|
sullivan
2014/10/21 14:27:42
The output of this is returned by _Run and not sen
qyearsley
2014/10/21 19:06:26
Right -- it should definitely print to stdout here
|
| + assert _LastCommitMessage() == COMMIT_MESSAGE, repr(_LastCommitMessage()) |
| + _Run(['git', 'reset', '--hard', 'HEAD~1']) |
|
sullivan
2014/10/21 14:27:42
I'm not very good with git--what would this do fro
Sergiy Byelozyorov
2014/10/21 19:04:37
It will only revert last commit, which are the cha
qyearsley
2014/10/21 19:06:26
Yep, that's the intention. The "git reset --hard H
|
| + |
| + |
| +def _LastCommitMessage(): |
| + return _Run(['git', 'log', '--format=%s', '-1']).strip() |
| + |
| + |
| +def _Run(command): |
| + logging.debug('Running %s', command) |
| + # Note: check_output will raise a subprocess.CalledProcessError when |
| + # the return-code is non-zero. |
| + return subprocess.check_output(command) |
| + |
| + |
| +if __name__ == '__main__': |
| + sys.exit(main(sys.argv)) |