OLD | NEW |
1 # Copyright 2013 The Chromium Authors. All rights reserved. | 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import json | 5 import json |
6 import math | 6 import math |
7 import os | 7 import os |
8 | 8 |
9 from telemetry import benchmark | 9 from telemetry import benchmark |
10 from telemetry import page as page_module | 10 from telemetry import page as page_module |
11 from telemetry.core import util | 11 from telemetry.core import util |
12 from telemetry.page import page_set | 12 from telemetry.page import page_set |
13 from telemetry.page import page_test | 13 from telemetry.page import page_test |
| 14 from telemetry.value import improvement_direction |
14 from telemetry.value import merge_values | 15 from telemetry.value import merge_values |
15 from telemetry.value import scalar | 16 from telemetry.value import scalar |
16 | 17 |
17 | 18 |
18 def _GeometricMean(values): | 19 def _GeometricMean(values): |
19 """Compute a rounded geometric mean from an array of values.""" | 20 """Compute a rounded geometric mean from an array of values.""" |
20 if not values: | 21 if not values: |
21 return None | 22 return None |
22 # To avoid infinite value errors, make sure no value is less than 0.001. | 23 # To avoid infinite value errors, make sure no value is less than 0.001. |
23 new_values = [] | 24 new_values = [] |
(...skipping 24 matching lines...) Expand all Loading... |
48 def _IsDone(): | 49 def _IsDone(): |
49 return tab.GetCookieByName('__domperf_finished') == '1' | 50 return tab.GetCookieByName('__domperf_finished') == '1' |
50 util.WaitFor(_IsDone, 600) | 51 util.WaitFor(_IsDone, 600) |
51 | 52 |
52 data = json.loads(tab.EvaluateJavaScript('__domperf_result')) | 53 data = json.loads(tab.EvaluateJavaScript('__domperf_result')) |
53 for suite in data['BenchmarkSuites']: | 54 for suite in data['BenchmarkSuites']: |
54 # Skip benchmarks that we didn't actually run this time around. | 55 # Skip benchmarks that we didn't actually run this time around. |
55 if len(suite['Benchmarks']) or suite['score']: | 56 if len(suite['Benchmarks']) or suite['score']: |
56 results.AddValue(scalar.ScalarValue( | 57 results.AddValue(scalar.ScalarValue( |
57 results.current_page, '%s.%s' % (suite['name'], SCORE_TRACE_NAME), | 58 results.current_page, '%s.%s' % (suite['name'], SCORE_TRACE_NAME), |
58 SCORE_UNIT, suite['score'], important=False)) | 59 SCORE_UNIT, suite['score'], important=False, |
| 60 improvement_direction=improvement_direction.UP)) |
59 finally: | 61 finally: |
60 tab.EvaluateJavaScript('document.cookie = "__domperf_finished=0"') | 62 tab.EvaluateJavaScript('document.cookie = "__domperf_finished=0"') |
61 | 63 |
62 def DidRunTest(self, browser, results): | 64 def DidRunTest(self, browser, results): |
63 # Now give the geometric mean as the total for the combined runs. | 65 # Now give the geometric mean as the total for the combined runs. |
64 combined = merge_values.MergeLikeValuesFromDifferentPages( | 66 combined = merge_values.MergeLikeValuesFromDifferentPages( |
65 results.all_page_specific_values, | 67 results.all_page_specific_values, |
66 group_by_name_suffix=True) | 68 group_by_name_suffix=True) |
67 combined_score = [x for x in combined if x.name == SCORE_TRACE_NAME][0] | 69 combined_score = [x for x in combined if x.name == SCORE_TRACE_NAME][0] |
68 total = _GeometricMean(combined_score.values) | 70 total = _GeometricMean(combined_score.values) |
69 results.AddSummaryValue( | 71 results.AddSummaryValue( |
70 scalar.ScalarValue(None, 'Total.' + SCORE_TRACE_NAME, SCORE_UNIT, | 72 scalar.ScalarValue(None, 'Total.' + SCORE_TRACE_NAME, SCORE_UNIT, |
71 total)) | 73 total, |
| 74 improvement_direction=improvement_direction.UP)) |
72 | 75 |
73 | 76 |
74 @benchmark.Disabled('android', 'linux') | 77 @benchmark.Disabled('android', 'linux') |
75 class DomPerf(benchmark.Benchmark): | 78 class DomPerf(benchmark.Benchmark): |
76 """A suite of JavaScript benchmarks for exercising the browser's DOM. | 79 """A suite of JavaScript benchmarks for exercising the browser's DOM. |
77 | 80 |
78 The final score is computed as the geometric mean of the individual results. | 81 The final score is computed as the geometric mean of the individual results. |
79 Scores are not comparable across benchmark suite versions and higher scores | 82 Scores are not comparable across benchmark suite versions and higher scores |
80 means better performance: Bigger is better!""" | 83 means better performance: Bigger is better!""" |
81 test = _DomPerfMeasurement | 84 test = _DomPerfMeasurement |
(...skipping 10 matching lines...) Expand all Loading... |
92 'Events', | 95 'Events', |
93 'Get+Elements', | 96 'Get+Elements', |
94 'GridSort', | 97 'GridSort', |
95 'Template' | 98 'Template' |
96 ] | 99 ] |
97 ps = page_set.PageSet(file_path=dom_perf_dir) | 100 ps = page_set.PageSet(file_path=dom_perf_dir) |
98 for param in run_params: | 101 for param in run_params: |
99 ps.AddUserStory(page_module.Page( | 102 ps.AddUserStory(page_module.Page( |
100 'file://run.html?reportInJS=1&run=%s' % param, ps, ps.base_dir)) | 103 'file://run.html?reportInJS=1&run=%s' % param, ps, ps.base_dir)) |
101 return ps | 104 return ps |
OLD | NEW |