| Index: tools/perf/perf_tools/image_decoding_benchmark.py
|
| diff --git a/tools/perf/perf_tools/image_decoding_benchmark.py b/tools/perf/perf_tools/image_decoding_benchmark.py
|
| index e23113445debda915ede4c72323cb2e282c990ae..ef85c5feffb18e3eead0bf2649cd20d36eb5b65c 100644
|
| --- a/tools/perf/perf_tools/image_decoding_benchmark.py
|
| +++ b/tools/perf/perf_tools/image_decoding_benchmark.py
|
| @@ -3,20 +3,29 @@
|
| # found in the LICENSE file.
|
|
|
| from telemetry import multi_page_benchmark
|
| -from telemetry import util
|
|
|
|
|
| class ImageDecoding(multi_page_benchmark.MultiPageBenchmark):
|
| - def MeasurePage(self, _, tab, results):
|
| + def WillNavigateToPage(self, page, tab):
|
| + tab.timeline.Start()
|
| +
|
| + def MeasurePage(self, page, tab, results):
|
| + tab.timeline.Stop()
|
| def _IsDone():
|
| return tab.runtime.Evaluate('isDone')
|
|
|
| - with tab.timeline.Recorder(tab.timeline):
|
| - tab.runtime.Execute('runBenchmark()')
|
| - util.WaitFor(_IsDone, 60)
|
| - iterations = tab.runtime.Evaluate('minIterations')
|
| - decode_image = tab.timeline.timeline_events.GetAllOfType('DecodeImage')
|
| - elapsed_times = [d.elapsed_time for d in decode_image[-iterations:]]
|
| + decode_image_events = \
|
| + tab.timeline.timeline_events.GetAllOfType('DecodeImage')
|
| +
|
| + # If it is a real image benchmark, then store only the last-minIterations
|
| + # decode tasks.
|
| + if (hasattr(page, 'is_image_decoding_benchmark') and
|
| + page.is_image_decoding_benchmark):
|
| + assert _IsDone()
|
| + max_event_count = tab.runtime.Evaluate('minIterations')
|
| + decode_image_events = decode_image_events[-max_event_count:]
|
| +
|
| + elapsed_times = [d.elapsed_time for d in decode_image_events]
|
| if not elapsed_times:
|
| results.Add('ImageDecoding_avg', 'ms', 'unsupported')
|
| return
|
|
|