Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(62)

Side by Side Diff: tools/android/loading/sandwich_metrics.py

Issue 1925803003: sandwich: Make speed-index and memory measurement optional from run-all (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Pull a sandwich run's output directory's metrics from traces into a CSV. 5 """Pull a sandwich run's output directory's metrics from traces into a CSV.
6 6
7 python pull_sandwich_metrics.py -h 7 python pull_sandwich_metrics.py -h
8 """ 8 """
9 9
10 import collections 10 import collections
(...skipping 25 matching lines...) Expand all
36 'total_load', 36 'total_load',
37 'js_onload_event', 37 'js_onload_event',
38 'browser_malloc_avg', 38 'browser_malloc_avg',
39 'browser_malloc_max', 39 'browser_malloc_max',
40 'speed_index', 40 'speed_index',
41 'net_emul.name', # Should be in emulation.NETWORK_CONDITIONS.keys() 41 'net_emul.name', # Should be in emulation.NETWORK_CONDITIONS.keys()
42 'net_emul.download', 42 'net_emul.download',
43 'net_emul.upload', 43 'net_emul.upload',
44 'net_emul.latency'] 44 'net_emul.latency']
45 45
46 _UNAVAILABLE_CSV_VALUE = 'unavailable'
47
46 _TRACKED_EVENT_NAMES = set(['requestStart', 'loadEventStart', 'loadEventEnd']) 48 _TRACKED_EVENT_NAMES = set(['requestStart', 'loadEventStart', 'loadEventEnd'])
47 49
48 # Points of a completeness record. 50 # Points of a completeness record.
49 # 51 #
50 # Members: 52 # Members:
51 # |time| is in milliseconds, 53 # |time| is in milliseconds,
52 # |frame_completeness| value representing how complete the frame is at a given 54 # |frame_completeness| value representing how complete the frame is at a given
53 # |time|. Caution: this completeness might be negative. 55 # |time|. Caution: this completeness might be negative.
54 CompletenessPoint = collections.namedtuple('CompletenessPoint', 56 CompletenessPoint = collections.namedtuple('CompletenessPoint',
55 ('time', 'frame_completeness')) 57 ('time', 'frame_completeness'))
(...skipping 18 matching lines...) Expand all
74 76
75 def _GetBrowserDumpEvents(tracing_track): 77 def _GetBrowserDumpEvents(tracing_track):
76 """Get the browser memory dump events from a tracing track. 78 """Get the browser memory dump events from a tracing track.
77 79
78 Args: 80 Args:
79 tracing_track: The tracing.TracingTrack. 81 tracing_track: The tracing.TracingTrack.
80 82
81 Returns: 83 Returns:
82 List of memory dump events. 84 List of memory dump events.
83 """ 85 """
86 assert sandwich_runner.MEMORY_DUMP_CATEGORY in tracing_track.Categories()
84 browser_pid = _GetBrowserPID(tracing_track) 87 browser_pid = _GetBrowserPID(tracing_track)
85 browser_dumps_events = [] 88 browser_dumps_events = []
86 for event in tracing_track.GetEvents(): 89 for event in tracing_track.GetEvents():
87 if event.category != 'disabled-by-default-memory-infra': 90 if event.category != 'disabled-by-default-memory-infra':
88 continue 91 continue
89 if event.type != 'v' or event.name != 'periodic_interval': 92 if event.type != 'v' or event.name != 'periodic_interval':
90 continue 93 continue
91 # Ignore dump events for processes other than the browser process 94 # Ignore dump events for processes other than the browser process
92 if event.pid != browser_pid: 95 if event.pid != browser_pid:
93 continue 96 continue
(...skipping 30 matching lines...) Expand all
124 if 'frame' in event.args and event.args['frame'] != main_frame: 127 if 'frame' in event.args and event.args['frame'] != main_frame:
125 continue 128 continue
126 if event_name in _TRACKED_EVENT_NAMES and event_name not in tracked_events: 129 if event_name in _TRACKED_EVENT_NAMES and event_name not in tracked_events:
127 logging.info('found url\'s event \'%s\'' % event_name) 130 logging.info('found url\'s event \'%s\'' % event_name)
128 tracked_events[event_name] = event 131 tracked_events[event_name] = event
129 assert len(tracked_events) == len(_TRACKED_EVENT_NAMES) 132 assert len(tracked_events) == len(_TRACKED_EVENT_NAMES)
130 return tracked_events 133 return tracked_events
131 134
132 135
133 def _ExtractMetricsFromLoadingTrace(loading_trace): 136 def _ExtractMetricsFromLoadingTrace(loading_trace):
134 """Pulls all the metrics from a given trace. 137 """Extract the metrics from a given trace.
pasko 2016/04/28 15:04:59 nit: I don't see a problem with the previous comme
gabadie 2016/04/28 15:56:22 I see your point, but here there was an inconsiste
135 138
136 Args: 139 Args:
137 loading_trace: loading_trace_module.LoadingTrace. 140 loading_trace: loading_trace_module.LoadingTrace.
138 141
139 Returns: 142 Returns:
140 Dictionary with all trace extracted fields set. 143 Dictionary with all trace extracted fields set.
141 """ 144 """
142 assert all(
143 cat in loading_trace.tracing_track.Categories()
144 for cat in sandwich_runner.ADDITIONAL_CATEGORIES), (
145 'This trace was not generated with the required set of categories '
146 'to be processed by this script.')
147 browser_dump_events = _GetBrowserDumpEvents(loading_trace.tracing_track)
148 web_page_tracked_events = _GetWebPageTrackedEvents( 145 web_page_tracked_events = _GetWebPageTrackedEvents(
149 loading_trace.tracing_track) 146 loading_trace.tracing_track)
147 return {
148 'total_load': (web_page_tracked_events['loadEventEnd'].start_msec -
149 web_page_tracked_events['requestStart'].start_msec),
150 'js_onload_event': (web_page_tracked_events['loadEventEnd'].start_msec -
151 web_page_tracked_events['loadEventStart'].start_msec)
152 }
150 153
154
155 def _ExtractMemoryMetricsFromLoadingTrace(loading_trace):
pasko 2016/04/28 15:04:59 Having two functions with very similar long names
gabadie 2016/04/28 15:56:22 Used _ExtractDefaultMetrics and _ExtractMemoryMetr
156 """Extract the memory metrics from a given trace.
157
158 Args:
159 loading_trace: loading_trace_module.LoadingTrace.
160
161 Returns:
162 Dictionary with all trace extracted fields set.
163 """
164 if (sandwich_runner.MEMORY_DUMP_CATEGORY not in
165 loading_trace.tracing_track.Categories()):
166 return {
167 'browser_malloc_avg': _UNAVAILABLE_CSV_VALUE,
168 'browser_malloc_max': _UNAVAILABLE_CSV_VALUE
169 }
170 browser_dump_events = _GetBrowserDumpEvents(loading_trace.tracing_track)
151 browser_malloc_sum = 0 171 browser_malloc_sum = 0
152 browser_malloc_max = 0 172 browser_malloc_max = 0
153 for dump_event in browser_dump_events: 173 for dump_event in browser_dump_events:
154 attr = dump_event.args['dumps']['allocators']['malloc']['attrs']['size'] 174 attr = dump_event.args['dumps']['allocators']['malloc']['attrs']['size']
155 assert attr['units'] == 'bytes' 175 assert attr['units'] == 'bytes'
156 size = int(attr['value'], 16) 176 size = int(attr['value'], 16)
157 browser_malloc_sum += size 177 browser_malloc_sum += size
158 browser_malloc_max = max(browser_malloc_max, size) 178 browser_malloc_max = max(browser_malloc_max, size)
159
160 return { 179 return {
161 'total_load': (web_page_tracked_events['loadEventEnd'].start_msec -
162 web_page_tracked_events['requestStart'].start_msec),
163 'js_onload_event': (web_page_tracked_events['loadEventEnd'].start_msec -
164 web_page_tracked_events['loadEventStart'].start_msec),
165 'browser_malloc_avg': browser_malloc_sum / float(len(browser_dump_events)), 180 'browser_malloc_avg': browser_malloc_sum / float(len(browser_dump_events)),
166 'browser_malloc_max': browser_malloc_max 181 'browser_malloc_max': browser_malloc_max
167 } 182 }
168 183
169 184
170 def _ExtractCompletenessRecordFromVideo(video_path): 185 def _ExtractCompletenessRecordFromVideo(video_path):
171 """Extracts the completeness record from a video. 186 """Extracts the completeness record from a video.
172 187
173 The video must start with a filled rectangle of orange (RGB: 222, 100, 13), to 188 The video must start with a filled rectangle of orange (RGB: 222, 100, 13), to
174 give the view-port size/location from where to compute the completeness. 189 give the view-port size/location from where to compute the completeness.
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
233 run_directory_path: Path of the run directory. 248 run_directory_path: Path of the run directory.
234 249
235 Returns: 250 Returns:
236 Dictionary of extracted metrics. 251 Dictionary of extracted metrics.
237 """ 252 """
238 trace_path = os.path.join(run_directory_path, 'trace.json') 253 trace_path = os.path.join(run_directory_path, 'trace.json')
239 logging.info('processing trace \'%s\'' % trace_path) 254 logging.info('processing trace \'%s\'' % trace_path)
240 loading_trace = loading_trace_module.LoadingTrace.FromJsonFile(trace_path) 255 loading_trace = loading_trace_module.LoadingTrace.FromJsonFile(trace_path)
241 run_metrics = {'url': loading_trace.url} 256 run_metrics = {'url': loading_trace.url}
242 run_metrics.update(_ExtractMetricsFromLoadingTrace(loading_trace)) 257 run_metrics.update(_ExtractMetricsFromLoadingTrace(loading_trace))
258 run_metrics.update(_ExtractMemoryMetricsFromLoadingTrace(loading_trace))
243 video_path = os.path.join(run_directory_path, 'video.mp4') 259 video_path = os.path.join(run_directory_path, 'video.mp4')
244 if os.path.isfile(video_path): 260 if os.path.isfile(video_path):
245 logging.info('processing speed-index video \'%s\'' % video_path) 261 logging.info('processing speed-index video \'%s\'' % video_path)
246 completeness_record = _ExtractCompletenessRecordFromVideo(video_path) 262 completeness_record = _ExtractCompletenessRecordFromVideo(video_path)
247 run_metrics['speed_index'] = ComputeSpeedIndex(completeness_record) 263 run_metrics['speed_index'] = ComputeSpeedIndex(completeness_record)
248 else: 264 else:
249 run_metrics['speed_index'] = 'disabled' 265 run_metrics['speed_index'] = _UNAVAILABLE_CSV_VALUE
250 for key, value in loading_trace.metadata['network_emulation'].iteritems(): 266 for key, value in loading_trace.metadata['network_emulation'].iteritems():
251 run_metrics['net_emul.' + key] = value 267 run_metrics['net_emul.' + key] = value
252 return run_metrics 268 return run_metrics
253 269
254 270
255 def ExtractMetricsFromRunnerOutputDirectory(output_directory_path): 271 def ExtractMetricsFromRunnerOutputDirectory(output_directory_path):
256 """Extracts all the metrics from all the traces of a sandwich runner output 272 """Extracts all the metrics from all the traces of a sandwich runner output
257 directory. 273 directory.
258 274
259 Args: 275 Args:
(...skipping 13 matching lines...) Expand all
273 except ValueError: 289 except ValueError:
274 continue 290 continue
275 run_directory_path = os.path.join(output_directory_path, node_name) 291 run_directory_path = os.path.join(output_directory_path, node_name)
276 run_metrics = _ExtractMetricsFromRunDirectory(run_directory_path) 292 run_metrics = _ExtractMetricsFromRunDirectory(run_directory_path)
277 run_metrics['repeat_id'] = repeat_id 293 run_metrics['repeat_id'] = repeat_id
278 assert set(run_metrics.keys()) == set(CSV_FIELD_NAMES) 294 assert set(run_metrics.keys()) == set(CSV_FIELD_NAMES)
279 metrics.append(run_metrics) 295 metrics.append(run_metrics)
280 assert len(metrics) > 0, ('Looks like \'{}\' was not a sandwich runner ' + 296 assert len(metrics) > 0, ('Looks like \'{}\' was not a sandwich runner ' +
281 'output directory.').format(output_directory_path) 297 'output directory.').format(output_directory_path)
282 return metrics 298 return metrics
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698