| Index: tools/telemetry/telemetry/multi_page_benchmark_runner.py
|
| diff --git a/tools/telemetry/telemetry/multi_page_benchmark_runner.py b/tools/telemetry/telemetry/multi_page_benchmark_runner.py
|
| index 89fc5e5c1b8f6bbdb37ff0d2bcdee282eac69b96..efb4fd9c55d3f684fa98580c7194c13d9d6e4c8a 100755
|
| --- a/tools/telemetry/telemetry/multi_page_benchmark_runner.py
|
| +++ b/tools/telemetry/telemetry/multi_page_benchmark_runner.py
|
| @@ -61,8 +61,10 @@ Use --browser=list to figure out which are available.\n"""
|
| sys.exit(1)
|
|
|
| results = multi_page_benchmark.CsvBenchmarkResults(csv.writer(sys.stdout))
|
| - with page_runner.PageRunner(ps) as runner:
|
| - runner.Run(options, possible_browser, benchmark, results)
|
| + with possible_browser.CreatePlatformHarness():
|
| + with page_runner.PageRunner(ps) as runner:
|
| + runner.Run(options, possible_browser, benchmark, results)
|
| +
|
| # When using an exact executable, assume it is a reference build for the
|
| # purpose of outputting the perf results.
|
| results.PrintSummary(options.browser_executable and '_ref' or '')
|
|
|