| Index: tools/telemetry/telemetry/benchmark_runner.py
|
| diff --git a/tools/telemetry/telemetry/benchmark_runner.py b/tools/telemetry/telemetry/benchmark_runner.py
|
| index 22a16df3e27e89388ec50fd30681adcb990eb3e6..348d5f87a4cba06e9882d20c6e2e39b401519cac 100644
|
| --- a/tools/telemetry/telemetry/benchmark_runner.py
|
| +++ b/tools/telemetry/telemetry/benchmark_runner.py
|
| @@ -19,11 +19,44 @@ from telemetry.core import browser_finder
|
| from telemetry.core import browser_options
|
| from telemetry.core import command_line
|
| from telemetry.core import discover
|
| -from telemetry.core import environment
|
| from telemetry.core import util
|
| from telemetry.util import find_dependencies
|
|
|
|
|
| +class Environment(object):
|
| + """Contains information about the benchmark runtime environment.
|
| +
|
| + Attributes:
|
| + top_level_dir: A dir that contains benchmark, page test, and/or user story
|
| + set dirs and associated artifacts.
|
| + benchmark_dirs: A list of dirs containing benchmarks.
|
| + benchmark_aliases: A dict of name:alias string pairs to be matched against
|
| + exactly during benchmark selection.
|
| + """
|
| + def __init__(self, top_level_dir, benchmark_dirs=None,
|
| + benchmark_aliases=None):
|
| + self._top_level_dir = top_level_dir
|
| + self._benchmark_dirs = benchmark_dirs or []
|
| + self._benchmark_aliases = benchmark_aliases or dict()
|
| +
|
| + if benchmark_aliases:
|
| + self._benchmark_aliases = benchmark_aliases
|
| + else:
|
| + self._benchmark_aliases = {}
|
| +
|
| + @property
|
| + def top_level_dir(self):
|
| + return self._top_level_dir
|
| +
|
| + @property
|
| + def benchmark_dirs(self):
|
| + return self._benchmark_dirs
|
| +
|
| + @property
|
| + def benchmark_aliases(self):
|
| + return self._benchmark_aliases
|
| +
|
| +
|
| class Help(command_line.OptparseCommand):
|
| """Display help information about a command"""
|
|
|
| @@ -61,17 +94,17 @@ class List(command_line.OptparseCommand):
|
| return parser
|
|
|
| @classmethod
|
| - def AddCommandLineArgs(cls, parser):
|
| + def AddCommandLineArgs(cls, parser, _):
|
| parser.add_option('-j', '--json-output-file', type='string')
|
| parser.add_option('-n', '--num-shards', type='int', default=1)
|
|
|
| @classmethod
|
| - def ProcessCommandLineArgs(cls, parser, args):
|
| + def ProcessCommandLineArgs(cls, parser, args, environment):
|
| if not args.positional_args:
|
| - args.benchmarks = _Benchmarks()
|
| + args.benchmarks = _Benchmarks(environment)
|
| elif len(args.positional_args) == 1:
|
| args.benchmarks = _MatchBenchmarkName(args.positional_args[0],
|
| - exact_matches=False)
|
| + environment, exact_matches=False)
|
| else:
|
| parser.error('Must provide at most one benchmark name.')
|
|
|
| @@ -105,13 +138,13 @@ class Run(command_line.OptparseCommand):
|
| return parser
|
|
|
| @classmethod
|
| - def AddCommandLineArgs(cls, parser):
|
| + def AddCommandLineArgs(cls, parser, environment):
|
| benchmark.AddCommandLineArgs(parser)
|
|
|
| # Allow benchmarks to add their own command line options.
|
| matching_benchmarks = []
|
| for arg in sys.argv[1:]:
|
| - matching_benchmarks += _MatchBenchmarkName(arg)
|
| + matching_benchmarks += _MatchBenchmarkName(arg, environment)
|
|
|
| if matching_benchmarks:
|
| # TODO(dtu): After move to argparse, add command-line args for all
|
| @@ -123,19 +156,19 @@ class Run(command_line.OptparseCommand):
|
| matching_benchmark.SetArgumentDefaults(parser)
|
|
|
| @classmethod
|
| - def ProcessCommandLineArgs(cls, parser, args):
|
| + def ProcessCommandLineArgs(cls, parser, args, environment):
|
| if not args.positional_args:
|
| possible_browser = (
|
| browser_finder.FindBrowser(args) if args.browser_type else None)
|
| - _PrintBenchmarkList(_Benchmarks(), possible_browser, sys.stderr)
|
| + _PrintBenchmarkList(_Benchmarks(environment), possible_browser)
|
| sys.exit(-1)
|
|
|
| input_benchmark_name = args.positional_args[0]
|
| - matching_benchmarks = _MatchBenchmarkName(input_benchmark_name)
|
| + matching_benchmarks = _MatchBenchmarkName(input_benchmark_name, environment)
|
| if not matching_benchmarks:
|
| print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
|
| print >> sys.stderr
|
| - _PrintBenchmarkList(_Benchmarks(), None, sys.stderr)
|
| + _PrintBenchmarkList(_Benchmarks(environment), None, sys.stderr)
|
| sys.exit(-1)
|
|
|
| if len(matching_benchmarks) > 1:
|
| @@ -180,15 +213,16 @@ def _MatchingCommands(string):
|
| if command.Name().startswith(string)]
|
|
|
| @decorators.Cache
|
| -def _Benchmarks():
|
| +def _Benchmarks(environment):
|
| benchmarks = []
|
| - for base_dir in config.base_paths:
|
| - benchmarks += discover.DiscoverClasses(base_dir, base_dir,
|
| + for search_dir in environment.benchmark_dirs:
|
| + benchmarks += discover.DiscoverClasses(search_dir,
|
| + environment.top_level_dir,
|
| benchmark.Benchmark,
|
| index_by_class_name=True).values()
|
| return benchmarks
|
|
|
| -def _MatchBenchmarkName(input_benchmark_name, exact_matches=True):
|
| +def _MatchBenchmarkName(input_benchmark_name, environment, exact_matches=True):
|
| def _Matches(input_string, search_string):
|
| if search_string.startswith(input_string):
|
| return True
|
| @@ -200,18 +234,18 @@ def _MatchBenchmarkName(input_benchmark_name, exact_matches=True):
|
| # Exact matching.
|
| if exact_matches:
|
| # Don't add aliases to search dict, only allow exact matching for them.
|
| - if input_benchmark_name in config.benchmark_aliases:
|
| - exact_match = config.benchmark_aliases[input_benchmark_name]
|
| + if input_benchmark_name in environment.benchmark_aliases:
|
| + exact_match = environment.benchmark_aliases[input_benchmark_name]
|
| else:
|
| exact_match = input_benchmark_name
|
|
|
| - for benchmark_class in _Benchmarks():
|
| + for benchmark_class in _Benchmarks(environment):
|
| if exact_match == benchmark_class.Name():
|
| return [benchmark_class]
|
| return []
|
|
|
| # Fuzzy matching.
|
| - return [benchmark_class for benchmark_class in _Benchmarks()
|
| + return [benchmark_class for benchmark_class in _Benchmarks(environment)
|
| if _Matches(input_benchmark_name, benchmark_class.Name())]
|
|
|
|
|
| @@ -310,10 +344,7 @@ def _PrintBenchmarkList(benchmarks, possible_browser, output_pipe=sys.stdout):
|
| print >> output_pipe
|
|
|
|
|
| -config = environment.Environment([util.GetBaseDir()])
|
| -
|
| -
|
| -def main():
|
| +def main(environment):
|
| # Get the command name from the command line.
|
| if len(sys.argv) > 1 and sys.argv[1] == '--help':
|
| sys.argv[1] = 'help'
|
| @@ -340,10 +371,10 @@ def main():
|
|
|
| # Parse and run the command.
|
| parser = command.CreateParser()
|
| - command.AddCommandLineArgs(parser)
|
| + command.AddCommandLineArgs(parser, environment)
|
| options, args = parser.parse_args()
|
| if commands:
|
| args = args[1:]
|
| options.positional_args = args
|
| - command.ProcessCommandLineArgs(parser, options)
|
| + command.ProcessCommandLineArgs(parser, options, environment)
|
| return command().Run(options)
|
|
|