Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(797)

Side by Side Diff: tools/perf/metrics/smoothness.py

Issue 23506030: telemetry: Add new metrics to smoothness benchmark. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Replaced 'score' by inverse RMS frame time. Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « tools/perf/metrics/gpu_rendering_stats.py ('k') | tools/perf/metrics/smoothness_unittest.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright 2013 The Chromium Authors. All rights reserved. 1 # Copyright 2013 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 import os 4 import os
5 5
6 from telemetry.core import util 6 from telemetry.core import util
7 from metrics import discrepancy
7 8
8 TIMELINE_MARKER = 'smoothness_scroll' 9 TIMELINE_MARKER = 'smoothness_scroll'
9 10
10 class SmoothnessMetrics(object): 11 class SmoothnessMetrics(object):
11 def __init__(self, tab): 12 def __init__(self, tab):
12 self._tab = tab 13 self._tab = tab
13 with open( 14 with open(
14 os.path.join(os.path.dirname(__file__), 15 os.path.join(os.path.dirname(__file__),
15 'smoothness.js')) as f: 16 'smoothness.js')) as f:
16 js = f.read() 17 js = f.read()
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
81 denominator_total = Total(denominator) 82 denominator_total = Total(denominator)
82 if denominator_total == 0: 83 if denominator_total == 0:
83 return 0 84 return 0
84 avg = numerator_total / denominator_total 85 avg = numerator_total / denominator_total
85 if scale: 86 if scale:
86 avg *= scale 87 avg *= scale
87 if precision: 88 if precision:
88 avg = round(avg, precision) 89 avg = round(avg, precision)
89 return avg 90 return avg
90 91
92 def DivideIfPossibleOrZero(numerator, denominator):
93 if not denominator:
94 return 0.0
95 else:
96 return numerator / denominator
97
98 def GeneralizedMean(values, exponent):
99 ''' http://en.wikipedia.org/wiki/Generalized_mean '''
100 if not values:
101 return 0.0
102 sum_of_powers = 0.0
103 for v in values:
104 sum_of_powers += v ** exponent
105 return (sum_of_powers / len(values)) ** (1.0/exponent)
106
91 def CalcFirstPaintTimeResults(results, tab): 107 def CalcFirstPaintTimeResults(results, tab):
92 if tab.browser.is_content_shell: 108 if tab.browser.is_content_shell:
93 results.Add('first_paint', 'ms', 'unsupported') 109 results.Add('first_paint', 'ms', 'unsupported')
94 return 110 return
95 111
96 tab.ExecuteJavaScript(""" 112 tab.ExecuteJavaScript("""
97 window.__rafFired = false; 113 window.__rafFired = false;
98 window.webkitRequestAnimationFrame(function() { 114 window.webkitRequestAnimationFrame(function() {
99 window.__rafFired = true; 115 window.__rafFired = true;
100 }); 116 });
101 """) 117 """)
102 util.WaitFor(lambda: tab.EvaluateJavaScript('window.__rafFired'), 60) 118 util.WaitFor(lambda: tab.EvaluateJavaScript('window.__rafFired'), 60)
103 119
104 first_paint_secs = tab.EvaluateJavaScript( 120 first_paint_secs = tab.EvaluateJavaScript(
105 'window.chrome.loadTimes().firstPaintTime - ' + 121 'window.chrome.loadTimes().firstPaintTime - ' +
106 'window.chrome.loadTimes().startLoadTime') 122 'window.chrome.loadTimes().startLoadTime')
107 123
108 results.Add('first_paint', 'ms', round(first_paint_secs * 1000, 1)) 124 results.Add('first_paint', 'ms', round(first_paint_secs * 1000, 1))
109 125
110 def CalcResults(benchmark_stats, results): 126 def CalcResults(benchmark_stats, results):
nduca 2013/09/05 17:57:41 you're saying screen frame throughout here but as
ernstm 2013/09/05 18:38:07 The name originates from the numFramesSentToScreen
111 s = benchmark_stats 127 s = benchmark_stats
112 128
129 frame_times = []
nduca 2013/09/05 17:57:41 screen_frame_xxx here and elsewhere
130 for i in xrange(1, len(s.screen_frame_timestamps)):
131 frame_times.append(
132 s.screen_frame_timestamps[i] - s.screen_frame_timestamps[i-1])
133
113 # Scroll Results 134 # Scroll Results
114 results.Add('mean_frame_time', 'ms', 135 results.Add('mean_frame_time', 'ms',
115 Average(s.total_time, s.screen_frame_count, 1000, 3)) 136 Average(s.total_time, s.screen_frame_count, 1000, 3))
137 results.Add('absolute_frame_discrepancy', '',
138 round(discrepancy.FrameDiscrepancy(s.screen_frame_timestamps,
139 True), 4))
140 results.Add('relative_frame_discrepancy', '',
nduca 2013/09/05 17:57:41 people are going to be very confused about the dif
ernstm 2013/09/05 18:38:07 I've put in both, so that we can evaluate which on
141 round(discrepancy.FrameDiscrepancy(s.screen_frame_timestamps,
142 False), 4))
143 results.Add('inverse_rms_frame_time', '',
144 round(DivideIfPossibleOrZero(1000.0,
nduca 2013/09/05 17:57:41 this is the key metric, no? can we make this less
ernstm 2013/09/05 18:38:07 The thing about this metric is that the exponent i
145 GeneralizedMean(frame_times, 2.0)),
146 2))
116 results.Add('dropped_percent', '%', 147 results.Add('dropped_percent', '%',
117 Average(s.dropped_frame_count, s.screen_frame_count, 148 Average(s.dropped_frame_count, s.screen_frame_count,
118 100, 1), 149 100, 1),
119 data_type='unimportant') 150 data_type='unimportant')
120 results.Add('percent_impl_scrolled', '%', 151 results.Add('percent_impl_scrolled', '%',
121 Average(s.impl_thread_scroll_count, 152 Average(s.impl_thread_scroll_count,
122 s.impl_thread_scroll_count + 153 s.impl_thread_scroll_count +
123 s.main_thread_scroll_count, 154 s.main_thread_scroll_count,
124 100, 1), 155 100, 1),
125 data_type='unimportant') 156 data_type='unimportant')
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
175 Average(s.touch_ui_latency, s.touch_ui_count, 1000, 3), 206 Average(s.touch_ui_latency, s.touch_ui_count, 1000, 3),
176 data_type='unimportant') 207 data_type='unimportant')
177 results.Add('average_touch_acked_latency', 'ms', 208 results.Add('average_touch_acked_latency', 'ms',
178 Average(s.touch_acked_latency, s.touch_acked_count, 209 Average(s.touch_acked_latency, s.touch_acked_count,
179 1000, 3), 210 1000, 3),
180 data_type='unimportant') 211 data_type='unimportant')
181 results.Add('average_scroll_update_latency', 'ms', 212 results.Add('average_scroll_update_latency', 'ms',
182 Average(s.scroll_update_latency, s.scroll_update_count, 213 Average(s.scroll_update_latency, s.scroll_update_count,
183 1000, 3), 214 1000, 3),
184 data_type='unimportant') 215 data_type='unimportant')
OLDNEW
« no previous file with comments | « tools/perf/metrics/gpu_rendering_stats.py ('k') | tools/perf/metrics/smoothness_unittest.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698