Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(34)

Side by Side Diff: tools/android/loading/sandwich_metrics.py

Issue 2009883002: sandwich: Make metrics extraction more customizable. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2016 The Chromium Authors. All rights reserved. 1 # Copyright 2016 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 """Pull a sandwich run's output directory's metrics from traces into a CSV. 5 """Pull a sandwich run's output directory's metrics from traces into a CSV.
6 6
7 python pull_sandwich_metrics.py -h 7 python pull_sandwich_metrics.py -h
8 """ 8 """
9 9
10 import collections 10 import collections
(...skipping 14 matching lines...) Expand all
25 from telemetry.internal.image_processing import video 25 from telemetry.internal.image_processing import video
26 from telemetry.util import image_util 26 from telemetry.util import image_util
27 from telemetry.util import rgba_color 27 from telemetry.util import rgba_color
28 28
29 import loading_trace as loading_trace_module 29 import loading_trace as loading_trace_module
30 import sandwich_runner 30 import sandwich_runner
31 import sandwich_misc 31 import sandwich_misc
32 import tracing 32 import tracing
33 33
34 34
35 CSV_FIELD_NAMES = [ 35 COMMON_CSV_COLUMN_NAMES = [
36 'repeat_id',
37 'url',
38 'chromium_commit', 36 'chromium_commit',
39 'platform', 37 'platform',
40 'subresource_discoverer',
41 'subresource_count',
42 # The amount of subresources detected at SetupBenchmark step.
43 'subresource_count_theoretic',
44 # Amount of subresources for caching as suggested by the subresource
45 # discoverer.
46 'cached_subresource_count_theoretic',
47 'cached_subresource_count',
48 'first_layout', 38 'first_layout',
49 'first_contentful_paint', 39 'first_contentful_paint',
50 'total_load', 40 'total_load',
51 'js_onload_event', 41 'js_onload_event',
52 'browser_malloc_avg', 42 'browser_malloc_avg',
53 'browser_malloc_max', 43 'browser_malloc_max',
54 'speed_index', 44 'speed_index',
55 'net_emul.name', # Should be in emulation.NETWORK_CONDITIONS.keys() 45 'net_emul.name', # Should be in emulation.NETWORK_CONDITIONS.keys()
56 'net_emul.download', 46 'net_emul.download',
57 'net_emul.upload', 47 'net_emul.upload',
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after
202 assert attr['units'] == 'bytes' 192 assert attr['units'] == 'bytes'
203 size = int(attr['value'], 16) 193 size = int(attr['value'], 16)
204 browser_malloc_sum += size 194 browser_malloc_sum += size
205 browser_malloc_max = max(browser_malloc_max, size) 195 browser_malloc_max = max(browser_malloc_max, size)
206 return { 196 return {
207 'browser_malloc_avg': browser_malloc_sum / float(len(browser_dump_events)), 197 'browser_malloc_avg': browser_malloc_sum / float(len(browser_dump_events)),
208 'browser_malloc_max': browser_malloc_max 198 'browser_malloc_max': browser_malloc_max
209 } 199 }
210 200
211 201
212 def _ExtractBenchmarkStatistics(benchmark_setup, loading_trace):
213 """Extracts some useful statistics from a benchmark run.
214
215 Args:
216 benchmark_setup: benchmark_setup: dict representing the benchmark setup
217 JSON. The JSON format is according to:
218 PrefetchBenchmarkBuilder.PopulateLoadBenchmark.SetupBenchmark.
219 loading_trace: loading_trace_module.LoadingTrace.
220
221 Returns:
222 Dictionary with all extracted fields set.
223 """
224 return {
225 'subresource_discoverer': benchmark_setup['subresource_discoverer'],
226 'subresource_count': len(sandwich_misc.ListUrlRequests(
227 loading_trace, sandwich_misc.RequestOutcome.All)),
228 'subresource_count_theoretic': len(benchmark_setup['url_resources']),
229 'cached_subresource_count': len(sandwich_misc.ListUrlRequests(
230 loading_trace, sandwich_misc.RequestOutcome.ServedFromCache)),
231 'cached_subresource_count_theoretic':
232 len(benchmark_setup['cache_whitelist']),
233 }
234
235
236 def _ExtractCompletenessRecordFromVideo(video_path): 202 def _ExtractCompletenessRecordFromVideo(video_path):
237 """Extracts the completeness record from a video. 203 """Extracts the completeness record from a video.
238 204
239 The video must start with a filled rectangle of orange (RGB: 222, 100, 13), to 205 The video must start with a filled rectangle of orange (RGB: 222, 100, 13), to
240 give the view-port size/location from where to compute the completeness. 206 give the view-port size/location from where to compute the completeness.
241 207
242 Args: 208 Args:
243 video_path: Path of the video to extract the completeness list from. 209 video_path: Path of the video to extract the completeness list from.
244 210
245 Returns: 211 Returns:
(...skipping 17 matching lines...) Expand all
263 if total_distance == 0: 229 if total_distance == 0:
264 if histogram.Distance(final_histogram) == 0: 230 if histogram.Distance(final_histogram) == 0:
265 return 1.0 231 return 1.0
266 else: 232 else:
267 return 0.0 233 return 0.0
268 return 1 - histogram.Distance(final_histogram) / total_distance 234 return 1 - histogram.Distance(final_histogram) / total_distance
269 235
270 return [(time, FrameProgress(hist)) for time, hist in histograms] 236 return [(time, FrameProgress(hist)) for time, hist in histograms]
271 237
272 238
273 def ComputeSpeedIndex(completeness_record): 239 def _ComputeSpeedIndex(completeness_record):
274 """Computes the speed-index from a completeness record. 240 """Computes the speed-index from a completeness record.
275 241
276 Args: 242 Args:
277 completeness_record: list(CompletenessPoint) 243 completeness_record: list(CompletenessPoint)
278 244
279 Returns: 245 Returns:
280 Speed-index value. 246 Speed-index value.
281 """ 247 """
282 speed_index = 0.0 248 speed_index = 0.0
283 last_time = completeness_record[0][0] 249 last_time = completeness_record[0][0]
284 last_completness = completeness_record[0][1] 250 last_completness = completeness_record[0][1]
285 for time, completeness in completeness_record: 251 for time, completeness in completeness_record:
286 if time < last_time: 252 if time < last_time:
287 raise ValueError('Completeness record must be sorted by timestamps.') 253 raise ValueError('Completeness record must be sorted by timestamps.')
288 elapsed = time - last_time 254 elapsed = time - last_time
289 speed_index += elapsed * (1.0 - last_completness) 255 speed_index += elapsed * (1.0 - last_completness)
290 last_time = time 256 last_time = time
291 last_completness = completeness 257 last_completness = completeness
292 return speed_index 258 return speed_index
293 259
294 260
295 def _ExtractMetricsFromRunDirectory(benchmark_setup, run_directory_path): 261 def ExtractCommonMetricsFromRepeatDirectory(repeat_dir, trace):
296 """Extracts all the metrics from traces and video of a sandwich run. 262 """Extracts all the metrics from traces and video of a sandwich run repeat
263 directory.
297 264
298 Args: 265 Args:
299 benchmark_setup: benchmark_setup: dict representing the benchmark setup 266 repeat_dir: Path of the repeat directory within a run directory.
300 JSON. The JSON format is according to: 267 trace: preloaded LoadingTrace in |repeat_dir|
301 PrefetchBenchmarkBuilder.PopulateLoadBenchmark.SetupBenchmark. 268
302 run_directory_path: Path of the run directory. 269 Contract:
270 trace == LoadingTrace.FromJsonFile(
271 os.path.join(repeat_dir, sandwich_runner.TRACE_FILENAME))
303 272
304 Returns: 273 Returns:
305 Dictionary of extracted metrics. 274 Dictionary of extracted metrics.
306 """ 275 """
307 trace_path = os.path.join(run_directory_path, 'trace.json')
308 logging.info('processing trace \'%s\'' % trace_path)
309 loading_trace = loading_trace_module.LoadingTrace.FromJsonFile(trace_path)
310 run_metrics = { 276 run_metrics = {
311 'url': loading_trace.url, 277 'chromium_commit': trace.metadata['chromium_commit'],
312 'chromium_commit': loading_trace.metadata['chromium_commit'], 278 'platform': (trace.metadata['platform']['os'] + '-' +
313 'platform': (loading_trace.metadata['platform']['os'] + '-' + 279 trace.metadata['platform']['product_model'])
314 loading_trace.metadata['platform']['product_model'])
315 } 280 }
316 run_metrics.update(_ExtractDefaultMetrics(loading_trace)) 281 run_metrics.update(_ExtractDefaultMetrics(trace))
317 run_metrics.update(_ExtractMemoryMetrics(loading_trace)) 282 run_metrics.update(_ExtractMemoryMetrics(trace))
318 if benchmark_setup: 283 video_path = os.path.join(repeat_dir, sandwich_runner.VIDEO_FILENAME)
319 run_metrics.update(
320 _ExtractBenchmarkStatistics(benchmark_setup, loading_trace))
321 video_path = os.path.join(run_directory_path, 'video.mp4')
322 if os.path.isfile(video_path): 284 if os.path.isfile(video_path):
323 logging.info('processing speed-index video \'%s\'' % video_path) 285 logging.info('processing speed-index video \'%s\'' % video_path)
324 try: 286 try:
325 completeness_record = _ExtractCompletenessRecordFromVideo(video_path) 287 completeness_record = _ExtractCompletenessRecordFromVideo(video_path)
326 run_metrics['speed_index'] = ComputeSpeedIndex(completeness_record) 288 run_metrics['speed_index'] = _ComputeSpeedIndex(completeness_record)
327 except video.BoundingBoxNotFoundException: 289 except video.BoundingBoxNotFoundException:
328 # Sometimes the bounding box for the web content area is not present. Skip 290 # Sometimes the bounding box for the web content area is not present. Skip
329 # calculating Speed Index. 291 # calculating Speed Index.
330 run_metrics['speed_index'] = _FAILED_CSV_VALUE 292 run_metrics['speed_index'] = _FAILED_CSV_VALUE
331 else: 293 else:
332 run_metrics['speed_index'] = _UNAVAILABLE_CSV_VALUE 294 run_metrics['speed_index'] = _UNAVAILABLE_CSV_VALUE
333 for key, value in loading_trace.metadata['network_emulation'].iteritems(): 295 for key, value in trace.metadata['network_emulation'].iteritems():
334 run_metrics['net_emul.' + key] = value 296 run_metrics['net_emul.' + key] = value
297 assert set(run_metrics.keys()) == set(COMMON_CSV_COLUMN_NAMES)
335 return run_metrics 298 return run_metrics
336
337
338 def ExtractMetricsFromRunnerOutputDirectory(benchmark_setup_path,
339 output_directory_path):
340 """Extracts all the metrics from all the traces of a sandwich runner output
341 directory.
342
343 Args:
344 benchmark_setup_path: Path of the JSON of the benchmark setup.
345 output_directory_path: The sandwich runner's output directory to extract the
346 metrics from.
347
348 Returns:
349 List of dictionaries.
350 """
351 benchmark_setup = None
352 if benchmark_setup_path:
353 benchmark_setup = json.load(open(benchmark_setup_path))
354 assert os.path.isdir(output_directory_path)
355 metrics = []
356 for node_name in os.listdir(output_directory_path):
357 if not os.path.isdir(os.path.join(output_directory_path, node_name)):
358 continue
359 try:
360 repeat_id = int(node_name)
361 except ValueError:
362 continue
363 run_directory_path = os.path.join(output_directory_path, node_name)
364 run_metrics = _ExtractMetricsFromRunDirectory(
365 benchmark_setup, run_directory_path)
366 run_metrics['repeat_id'] = repeat_id
367 # TODO(gabadie): Make common metrics extraction with benchmark type
368 # specific CSV column.
369 # assert set(run_metrics.keys()) == set(CSV_FIELD_NAMES)
370 metrics.append(run_metrics)
371 assert len(metrics) > 0, ('Looks like \'{}\' was not a sandwich runner ' +
372 'output directory.').format(output_directory_path)
373 return metrics
OLDNEW
« no previous file with comments | « no previous file | tools/android/loading/sandwich_runner.py » ('j') | tools/android/loading/sandwich_runner.py » ('J')

Powered by Google App Engine
This is Rietveld 408576698