| Index: tools/perf/benchmarks/canvasmark.py
|
| diff --git a/tools/perf/benchmarks/canvasmark.py b/tools/perf/benchmarks/canvasmark.py
|
| index 911f76223f59e1762fcc64d6c23aa7cd3b00958a..27912196498bff54312cf13f13acfc4c76b7d10f 100644
|
| --- a/tools/perf/benchmarks/canvasmark.py
|
| +++ b/tools/perf/benchmarks/canvasmark.py
|
| @@ -14,6 +14,7 @@ import os
|
| from telemetry import benchmark
|
| from telemetry.page import page_measurement
|
| from telemetry.page import page_set
|
| +from telemetry.value import scalar
|
|
|
|
|
| class _CanvasMarkMeasurement(page_measurement.PageMeasurement):
|
| @@ -40,10 +41,12 @@ class _CanvasMarkMeasurement(page_measurement.PageMeasurement):
|
| 'Unexpected result format "%s"' % score_and_name
|
| score = int(score_and_name[0])
|
| name = score_and_name[1][:-1]
|
| - results.Add(name, 'score', score, data_type='unimportant')
|
| + results.AddValue(scalar.ScalarValue(
|
| + results.current_page, name, 'score', score, important=False))
|
| # Aggregate total score for all tests.
|
| total += score
|
| - results.Add('Score', 'score', total)
|
| + results.AddValue(scalar.ScalarValue(
|
| + results.current_page, 'Score', 'score', total))
|
|
|
|
|
| @benchmark.Disabled
|
|
|