Index: tools/telemetry/telemetry/multi_page_benchmark_runner.py |
diff --git a/tools/telemetry/telemetry/multi_page_benchmark_runner.py b/tools/telemetry/telemetry/multi_page_benchmark_runner.py |
index 48837eecd6f1c2eb38b596f1e1850a729602bb37..fe814305d0cf7ebb6b46fa6775c2501b9bff78ae 100755 |
--- a/tools/telemetry/telemetry/multi_page_benchmark_runner.py |
+++ b/tools/telemetry/telemetry/multi_page_benchmark_runner.py |
@@ -59,22 +59,23 @@ def Main(benchmark_dir): |
# Naively find the benchmark. If we use the browser options parser, we run |
# the risk of failing to parse if we use a benchmark-specific parameter. |
- benchmark_name = None |
+ benchmark_names = [] |
for arg in sys.argv: |
if arg in benchmarks: |
- benchmark_name = arg |
+ benchmark_names.append(arg) |
options = browser_options.BrowserOptions() |
parser = options.CreateParser('%prog [options] <benchmark> <page_set>') |
- benchmark = None |
- if benchmark_name is not None: |
+ selected_benchmarks = [] |
+ for benchmark_name in benchmark_names: |
benchmark = benchmarks[benchmark_name]() |
benchmark.AddOptions(parser) |
+ selected_benchmarks.append(benchmark) |
_, args = parser.parse_args() |
- if benchmark is None or len(args) != 2: |
+ if not selected_benchmarks or len(args) != 1 + len(selected_benchmarks): |
parser.print_usage() |
import page_sets # pylint: disable=F0401 |
print >> sys.stderr, 'Available benchmarks:\n%s\n' % ',\n'.join( |
@@ -84,23 +85,27 @@ def Main(benchmark_dir): |
for f in page_sets.GetAllPageSetFilenames()])) |
sys.exit(1) |
- ps = page_set.PageSet.FromFile(args[1]) |
+ ps = page_set.PageSet.FromFile(args[-1]) |
- benchmark.CustomizeBrowserOptions(options) |
+ for benchmark in selected_benchmarks: |
+ benchmark.CustomizeBrowserOptions(options) |
possible_browser = browser_finder.FindBrowser(options) |
if not possible_browser: |
print >> sys.stderr, """No browser found.\n |
Use --browser=list to figure out which are available.\n""" |
sys.exit(1) |
- results = multi_page_benchmark.CsvBenchmarkResults(csv.writer(sys.stdout)) |
- with page_runner.PageRunner(ps) as runner: |
- runner.Run(options, possible_browser, benchmark, results) |
- # When using an exact executable, assume it is a reference build for the |
- # purpose of outputting the perf results. |
- results.PrintSummary(options.browser_executable and '_ref' or '') |
- |
- if len(results.page_failures): |
- logging.warning('Failed pages: %s', '\n'.join( |
- [failure['page'].url for failure in results.page_failures])) |
- return min(255, len(results.page_failures)) |
+ failures = 0 |
+ for benchmark in selected_benchmarks: |
+ results = multi_page_benchmark.CsvBenchmarkResults(csv.writer(sys.stdout)) |
+ with page_runner.PageRunner(ps) as runner: |
+ runner.Run(options, possible_browser, benchmark, results) |
+ # When using an exact executable, assume it is a reference build for the |
+ # purpose of outputting the perf results. |
+ results.PrintSummary(options.browser_executable and '_ref' or '') |
+ |
+ if results.page_failures: |
+ logging.warning('Failed pages: %s', '\n'.join( |
+ [failure['page'].url for failure in results.page_failures])) |
+ failures += len(results.page_failures) |
+ return min(255, failures) |