| Index: tools/telemetry/telemetry/benchmark_runner.py
|
| diff --git a/tools/telemetry/telemetry/benchmark_runner.py b/tools/telemetry/telemetry/benchmark_runner.py
|
| index 13a2fc20f6b2eb7c0bcef07e44dc6165f07dd351..7c643a8e4be767fcd4eba35c54561ec6b7ed65cb 100644
|
| --- a/tools/telemetry/telemetry/benchmark_runner.py
|
| +++ b/tools/telemetry/telemetry/benchmark_runner.py
|
| @@ -13,7 +13,7 @@ import json
|
| import os
|
| import sys
|
|
|
| -from telemetry import benchmark
|
| +from telemetry import benchmark as benchmark_module
|
| from telemetry import decorators
|
| from telemetry.core import browser_finder
|
| from telemetry.core import browser_options
|
| @@ -40,9 +40,10 @@ class Help(command_line.OptparseCommand):
|
|
|
| def Run(self, args):
|
| if len(args.positional_args) == 1:
|
| - commands = _MatchingCommands(args.positional_args[0])
|
| - if len(commands) == 1:
|
| - command = commands[0]
|
| + command_classes = _MatchingCommands(args.positional_args[0])
|
| + if len(command_classes) == 1:
|
| + command_class = command_classes[0]
|
| + command = command_class()
|
| parser = command.CreateParser()
|
| command.AddCommandLineArgs(parser)
|
| parser.print_help()
|
| @@ -107,37 +108,12 @@ class Run(command_line.OptparseCommand):
|
|
|
| usage = 'benchmark_name [page_set] [<options>]'
|
|
|
| - @classmethod
|
| - def CreateParser(cls):
|
| - options = browser_options.BrowserFinderOptions()
|
| - parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
|
| - return parser
|
| -
|
| - @classmethod
|
| - def AddCommandLineArgs(cls, parser):
|
| - benchmark.AddCommandLineArgs(parser)
|
| -
|
| - # Allow benchmarks to add their own command line options.
|
| - matching_benchmarks = []
|
| - for arg in sys.argv[1:]:
|
| - matching_benchmarks += _MatchBenchmarkName(arg)
|
| -
|
| - if matching_benchmarks:
|
| - # TODO(dtu): After move to argparse, add command-line args for all
|
| - # benchmarks to subparser. Using subparsers will avoid duplicate
|
| - # arguments.
|
| - matching_benchmark = matching_benchmarks.pop()
|
| - matching_benchmark.AddCommandLineArgs(parser)
|
| - # The benchmark's options override the defaults!
|
| - matching_benchmark.SetArgumentDefaults(parser)
|
| + def __init__(self, input_benchmark_name=None):
|
| + super(Run, self).__init__()
|
| + if input_benchmark_name is None:
|
| + self._benchmark = None
|
| + return
|
|
|
| - @classmethod
|
| - def ProcessCommandLineArgs(cls, parser, args):
|
| - if not args.positional_args:
|
| - _PrintBenchmarkList(_Benchmarks())
|
| - sys.exit(-1)
|
| -
|
| - input_benchmark_name = args.positional_args[0]
|
| matching_benchmarks = _MatchBenchmarkName(input_benchmark_name)
|
| if not matching_benchmarks:
|
| print >> sys.stderr, 'No benchmark named "%s".' % input_benchmark_name
|
| @@ -154,19 +130,31 @@ class Run(command_line.OptparseCommand):
|
| sys.exit(-1)
|
|
|
| benchmark_class = matching_benchmarks.pop()
|
| - if len(args.positional_args) > 1:
|
| - parser.error('Too many arguments.')
|
|
|
| - assert issubclass(benchmark_class, benchmark.Benchmark), (
|
| + assert issubclass(benchmark_class, benchmark_module.Benchmark), (
|
| 'Trying to run a non-Benchmark?!')
|
|
|
| - benchmark.ProcessCommandLineArgs(parser, args)
|
| - benchmark_class.ProcessCommandLineArgs(parser, args)
|
| + self._benchmark = benchmark_class()
|
|
|
| - cls._benchmark = benchmark_class
|
| + @classmethod
|
| + def CreateParser(cls):
|
| + options = browser_options.BrowserFinderOptions()
|
| + parser = options.CreateParser('%%prog %s %s' % (cls.Name(), cls.usage))
|
| + return parser
|
| +
|
| + def AddCommandLineArgs(self, parser):
|
| + benchmark_module.AddCommandLineArgs(parser)
|
| + if self._benchmark is not None:
|
| + self._benchmark.AddCommandLineArgs(parser)
|
| + self._benchmark.SetArgumentDefaults(parser)
|
| +
|
| + def ProcessCommandLineArgs(self, parser, args):
|
| + benchmark_module.ProcessCommandLineArgs(parser, args)
|
| + if self._benchmark is not None:
|
| + self._benchmark.ProcessCommandLineArgs(parser, args)
|
|
|
| def Run(self, args):
|
| - return min(255, self._benchmark().Run(args))
|
| + return min(255, self._benchmark.Run(args))
|
|
|
|
|
| def _ScriptName():
|
| @@ -192,7 +180,7 @@ def _Benchmarks():
|
| print config
|
| for base_dir in config.base_paths:
|
| benchmarks += discover.DiscoverClasses(base_dir, base_dir,
|
| - benchmark.Benchmark,
|
| + benchmark_module.Benchmark,
|
| index_by_class_name=True).values()
|
| return benchmarks
|
|
|
| @@ -246,7 +234,7 @@ def _GetJsonBenchmarkList(possible_browser, possible_reference_browser,
|
| }
|
| }
|
| for benchmark_class in benchmark_classes:
|
| - if not issubclass(benchmark_class, benchmark.Benchmark):
|
| + if not issubclass(benchmark_class, benchmark_module.Benchmark):
|
| continue
|
| if not decorators.IsEnabled(benchmark_class, possible_browser):
|
| continue
|
| @@ -289,7 +277,7 @@ def _PrintBenchmarkList(benchmarks):
|
| format_string = ' %%-%ds %%s' % max(len(b.Name()) for b in benchmarks)
|
|
|
| filtered_benchmarks = [benchmark_class for benchmark_class in benchmarks
|
| - if issubclass(benchmark_class, benchmark.Benchmark)]
|
| + if issubclass(benchmark_class, benchmark_module.Benchmark)]
|
| if filtered_benchmarks:
|
| print >> sys.stderr, 'Available benchmarks are:'
|
| for benchmark_class in sorted(filtered_benchmarks, key=lambda b: b.Name()):
|
| @@ -302,36 +290,53 @@ config = environment.Environment([util.GetBaseDir()])
|
|
|
|
|
| def main():
|
| + print 'benchmark_runner.main: sys.argv =', sys.argv
|
| +
|
| # Get the command name from the command line.
|
| if len(sys.argv) > 1 and sys.argv[1] == '--help':
|
| sys.argv[1] = 'help'
|
|
|
| - command_name = 'run'
|
| + args = []
|
| for arg in sys.argv[1:]:
|
| if not arg.startswith('-'):
|
| - command_name = arg
|
| - break
|
| + args.append(arg)
|
| +
|
| + if (args):
|
| + command_name = args[0]
|
| + else:
|
| + _PrintBenchmarkList(_Benchmarks())
|
| + sys.exit(-1)
|
|
|
| # Validate and interpret the command name.
|
| - commands = _MatchingCommands(command_name)
|
| - if len(commands) > 1:
|
| + command_classes = _MatchingCommands(command_name)
|
| + if len(command_classes) > 1:
|
| print >> sys.stderr, ('"%s" is not a %s command. Did you mean one of these?'
|
| % (command_name, _ScriptName()))
|
| - for command in commands:
|
| + for command_class in command_classes:
|
| print >> sys.stderr, ' %-10s %s' % (
|
| - command.Name(), command.Description())
|
| + command_class.Name(), command_class.Description())
|
| return 1
|
| - if commands:
|
| - command = commands[0]
|
| +
|
| + command_class = Run
|
| + if command_classes:
|
| + command_class = command_classes[0]
|
| +
|
| + if command_class == Run:
|
| + if len(args) > 2:
|
| + print >> sys.stderr, 'Too many arguments.'
|
| + sys.exit(-1)
|
| + else:
|
| + input_benchmark_name = args[-1]
|
| + command = command_class(input_benchmark_name)
|
| else:
|
| - command = Run
|
| + command = command_class()
|
|
|
| # Parse and run the command.
|
| parser = command.CreateParser()
|
| command.AddCommandLineArgs(parser)
|
| options, args = parser.parse_args()
|
| - if commands:
|
| + if command_classes:
|
| args = args[1:]
|
| options.positional_args = args
|
| command.ProcessCommandLineArgs(parser, options)
|
| - return command().Run(options)
|
| + return command.Run(options)
|
|
|