Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(17)

Side by Side Diff: mojo/devtools/common/mojo_benchmark

Issue 1320403003: Teach mojo_benchmark to control caching conditions. (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2015 The Chromium Authors. All rights reserved. 2 # Copyright 2015 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runner for Mojo application benchmarks.""" 6 """Runner for Mojo application benchmarks."""
7 7
8 import argparse 8 import argparse
9 import logging 9 import logging
10 import sys 10 import sys
(...skipping 25 matching lines...) Expand all
36 - 'avg_duration' - average duration of the targeted event 36 - 'avg_duration' - average duration of the targeted event
37 37
38 |benchmark_list_file| may reference the |target_os| global that will be any of 38 |benchmark_list_file| may reference the |target_os| global that will be any of
39 ['android', 'linux'], indicating the system on which the benchmarks are to be 39 ['android', 'linux'], indicating the system on which the benchmarks are to be
40 run. 40 run.
41 """ 41 """
42 42
43 _logger = logging.getLogger() 43 _logger = logging.getLogger()
44 44
45 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo' 45 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo'
46 _LEGACY_CACHE_SERVICE_URL = 'mojo:url_response_disk_cache'
47 _CACHE_SERVICE_URL = 'https://core.mojoapps.io/url_response_disk_cache.mojo'
qsr (NOT THE RIGHT qsr) 2015/09/07 11:29:58 Why both URLs?
ppi 2015/09/07 11:32:58 So that this doesn't break when we switch the call
46 48
47 # Additional time in seconds allocated per shell run to accommodate start-up. 49 # Additional time in seconds allocated per shell run to accommodate start-up.
48 # The shell should terminate before hitting this time out, it is an error if it 50 # The shell should terminate before hitting this time out, it is an error if it
49 # doesn't. 51 # doesn't.
50 _EXTRA_TIMEOUT = 20 52 _EXTRA_TIMEOUT = 20
51 53
52 54
53 def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements, 55 def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements,
54 verbose): 56 cold_start, verbose):
55 """Runs `benchmark.mojo` in a shell with correct arguments, parses and 57 """Runs `benchmark.mojo` in shell with correct arguments, parses and
56 presents the benchmark results. 58 presents the benchmark results.
57 """ 59 """
58 timeout = duration_seconds + _EXTRA_TIMEOUT 60 timeout = duration_seconds + _EXTRA_TIMEOUT
59 benchmark_args = [] 61 benchmark_args = []
60 benchmark_args.append('--app=' + app) 62 benchmark_args.append('--app=' + app)
61 benchmark_args.append('--duration=' + str(duration_seconds)) 63 benchmark_args.append('--duration=' + str(duration_seconds))
62 for measurement in measurements: 64 for measurement in measurements:
63 benchmark_args.append(measurement) 65 benchmark_args.append(measurement)
66
67 shell_args = list(shell_args)
64 shell_args.append(_BENCHMARK_APP) 68 shell_args.append(_BENCHMARK_APP)
65 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP, 69 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP,
66 ' '.join(benchmark_args))) 70 ' '.join(benchmark_args)))
71
72 if cold_start:
73 shell_args.append('--args-for=%s %s' % (_LEGACY_CACHE_SERVICE_URL,
74 '--clear'))
75 shell_args.append('--args-for=%s %s' % (_CACHE_SERVICE_URL, '--clear'))
76
67 if verbose: 77 if verbose:
68 print 'shell arguments: ' + str(shell_args) 78 print 'shell arguments: ' + str(shell_args)
69 print '[' + name + ']' 79 print '[ %s ] %s' % (name, 'cold start' if cold_start else 'warm start')
70 return_code, output, did_time_out = shell.run_and_get_output( 80 return_code, output, did_time_out = shell.run_and_get_output(
71 shell_args, timeout=timeout) 81 shell_args, timeout=timeout)
72 output_lines = [line.strip() for line in output.split('\n')] 82 output_lines = [line.strip() for line in output.split('\n')]
73 83
74 if return_code or did_time_out or 'benchmark succeeded' not in output_lines: 84 if return_code or did_time_out or 'benchmark succeeded' not in output_lines:
75 print 'timed out' if did_time_out else 'failed' 85 print 'timed out' if did_time_out else 'failed'
76 if return_code: 86 if return_code:
77 print 'Return code: ' + str(return_code) 87 print 'Return code: ' + str(return_code)
78 print 'Output: ' 88 print 'Output: '
79 print output 89 print output
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
111 benchmark_list = benchmark_list_params['benchmarks'] 121 benchmark_list = benchmark_list_params['benchmarks']
112 122
113 succeeded = True 123 succeeded = True
114 for benchmark_spec in benchmark_list: 124 for benchmark_spec in benchmark_list:
115 name = benchmark_spec['name'] 125 name = benchmark_spec['name']
116 app = benchmark_spec['app'] 126 app = benchmark_spec['app']
117 duration = benchmark_spec['duration'] 127 duration = benchmark_spec['duration']
118 shell_args = benchmark_spec.get('shell-args', []) + common_shell_args 128 shell_args = benchmark_spec.get('shell-args', []) + common_shell_args
119 measurements = benchmark_spec['measurements'] 129 measurements = benchmark_spec['measurements']
120 _run_benchmark(shell, shell_args, name, app, duration, measurements, 130 _run_benchmark(shell, shell_args, name, app, duration, measurements,
121 script_args.verbose) 131 cold_start=True, verbose=script_args.verbose)
132 _run_benchmark(shell, shell_args, name, app, duration, measurements,
133 cold_start=False, verbose=script_args.verbose)
122 134
123 return 0 if succeeded else 1 135 return 0 if succeeded else 1
124 136
125 if __name__ == '__main__': 137 if __name__ == '__main__':
126 sys.exit(main()) 138 sys.exit(main())
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698