Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(142)

Side by Side Diff: mojo/devtools/common/mojo_benchmark

Issue 1392173006: Make mojo_benchmark understand the output it is producing. (Closed) Base URL: https://github.com/domokit/mojo.git@master
Patch Set: Address Trung's comments. Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2015 The Chromium Authors. All rights reserved. 2 # Copyright 2015 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runner for Mojo application benchmarks.""" 6 """Runner for Mojo application benchmarks."""
7 7
8 import argparse 8 import argparse
9 import logging 9 import logging
10 import sys 10 import sys
11 import time 11 import time
12 import os.path 12 import os.path
13 import re
13 14
14 from devtoolslib import shell_arguments 15 from devtoolslib import shell_arguments
15 from devtoolslib import shell_config 16 from devtoolslib import shell_config
16 17
17 18
18 _DESCRIPTION = """Runner for Mojo application benchmarks. 19 _DESCRIPTION = """Runner for Mojo application benchmarks.
19 20
20 |benchmark_list_file| has to be a valid Python program that sets a |benchmarks| 21 |benchmark_list_file| has to be a valid Python program that sets a |benchmarks|
21 global variable, containing entries of the following form: 22 global variable, containing entries of the following form:
22 23
(...skipping 30 matching lines...) Expand all
53 _COLD_START_SHELL_ARGS = [ 54 _COLD_START_SHELL_ARGS = [
54 '--args-for=%s %s' % (_CACHE_SERVICE_URL, '--clear'), 55 '--args-for=%s %s' % (_CACHE_SERVICE_URL, '--clear'),
55 '--args-for=%s %s' % (_NETWORK_SERVICE_URL, '--clear'), 56 '--args-for=%s %s' % (_NETWORK_SERVICE_URL, '--clear'),
56 ] 57 ]
57 58
58 # Additional time in seconds allocated per shell run to accommodate start-up. 59 # Additional time in seconds allocated per shell run to accommodate start-up.
59 # The shell should terminate before hitting this time out, it is an error if it 60 # The shell should terminate before hitting this time out, it is an error if it
60 # doesn't. 61 # doesn't.
61 _EXTRA_TIMEOUT = 20 62 _EXTRA_TIMEOUT = 20
62 63
64 _MEASUREMENT_RESULT_FORMAT = r"""
65 ^ # Beginning of the line.
66 measurement: # Hard-coded tag.
67 \s+(\S+) # Match measurement name.
68 \s+(\S+) # Match measurement result.
69 $ # End of the line.
70 """
71
72 _MEASUREMENT_REGEX = re.compile(_MEASUREMENT_RESULT_FORMAT, re.VERBOSE)
73
63 74
64 def _generate_benchmark_variants(benchmark_spec): 75 def _generate_benchmark_variants(benchmark_spec):
65 """Generates benchmark specifications for individual variants of the given 76 """Generates benchmark specifications for individual variants of the given
66 benchmark: cold start and warm start. 77 benchmark: cold start and warm start.
67 78
68 Returns: 79 Returns:
69 A list of benchmark specs corresponding to individual variants of the given 80 A list of benchmark specs corresponding to individual variants of the given
70 benchmark. 81 benchmark.
71 """ 82 """
72 variants = [] 83 variants = []
(...skipping 10 matching lines...) Expand all
83 'name': benchmark_spec['name'] + ' (warm start)', 94 'name': benchmark_spec['name'] + ' (warm start)',
84 'app': benchmark_spec['app'], 95 'app': benchmark_spec['app'],
85 'duration': benchmark_spec['duration'], 96 'duration': benchmark_spec['duration'],
86 'measurements': benchmark_spec['measurements'], 97 'measurements': benchmark_spec['measurements'],
87 'shell-args': benchmark_spec.get('shell-args', [])}) 98 'shell-args': benchmark_spec.get('shell-args', [])})
88 return variants 99 return variants
89 100
90 101
91 def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements, 102 def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements,
92 verbose, android, save_traces): 103 verbose, android, save_traces):
93 """Runs `benchmark.mojo` in shell with correct arguments, parses and 104 """Runs the given benchmark by running `benchmark.mojo` in mojo shell with
94 presents the benchmark results. 105 appropriate arguments and returns the produced output.
106
107 Returns:
108 A tuple of (succeeded, error_msg, output).
95 """ 109 """
96 timeout = duration_seconds + _EXTRA_TIMEOUT 110 timeout = duration_seconds + _EXTRA_TIMEOUT
97 benchmark_args = [] 111 benchmark_args = []
98 benchmark_args.append('--app=' + app) 112 benchmark_args.append('--app=' + app)
99 benchmark_args.append('--duration=' + str(duration_seconds)) 113 benchmark_args.append('--duration=' + str(duration_seconds))
100 114
101 output_file = None 115 output_file = None
102 device_output_file = None 116 device_output_file = None
103 if save_traces: 117 if save_traces:
104 output_file = 'benchmark-%s-%s.trace' % (name.replace(' ', '_'), 118 output_file = 'benchmark-%s-%s.trace' % (name.replace(' ', '_'),
105 time.strftime('%Y%m%d%H%M%S')) 119 time.strftime('%Y%m%d%H%M%S'))
106 if android: 120 if android:
107 device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file) 121 device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file)
108 benchmark_args.append('--trace-output=' + device_output_file) 122 benchmark_args.append('--trace-output=' + device_output_file)
109 else: 123 else:
110 benchmark_args.append('--trace-output=' + output_file) 124 benchmark_args.append('--trace-output=' + output_file)
111 125
112 for measurement in measurements: 126 for measurement in measurements:
113 benchmark_args.append(measurement) 127 benchmark_args.append(measurement)
114 128
115 shell_args = list(shell_args) 129 shell_args = list(shell_args)
116 shell_args.append(_BENCHMARK_APP) 130 shell_args.append(_BENCHMARK_APP)
117 shell_args.append('--force-offline-by-default') 131 shell_args.append('--force-offline-by-default')
118 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP, 132 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP,
119 ' '.join(benchmark_args))) 133 ' '.join(benchmark_args)))
120 134
121 if verbose: 135 if verbose:
122 print 'shell arguments: ' + str(shell_args) 136 print 'shell arguments: ' + str(shell_args)
123 print '[ %s ]' % name
124 return_code, output, did_time_out = shell.run_and_get_output( 137 return_code, output, did_time_out = shell.run_and_get_output(
125 shell_args, timeout=timeout) 138 shell_args, timeout=timeout)
126 output_lines = [line.strip() for line in output.split('\n')]
127 139
128 if return_code or did_time_out or 'benchmark succeeded' not in output_lines: 140 if did_time_out:
129 print 'timed out' if did_time_out else 'failed' 141 return False, 'timed out', output
130 if return_code: 142 if return_code:
131 print 'Return code: ' + str(return_code) 143 return False, 'return code: ' + str(return_code), output
132 print 'Output: '
133 print output
134 print '-' * 72
135 return False
136 144
137 # Echo measurement results. 145 # Pull the trace file even if some measurements are missing, as it can be
138 for line in output_lines: 146 # useful in debugging.
139 if line.strip().startswith('measurement:') or 'WARNING' in line:
140 print line
141
142 if device_output_file: 147 if device_output_file:
143 shell.pull_file(device_output_file, output_file, remove_original=True) 148 shell.pull_file(device_output_file, output_file, remove_original=True)
144 return True 149
150 return True, None, output
151
152 def _parse_measurement_results(output):
153 """Parses the measurement results present in the benchmark output and returns
154 the dictionary of correctly recognized and parsed results.
155 """
156 measurement_results = {}
157 output_lines = [line.strip() for line in output.split('\n')]
158 for line in output_lines:
159 match = re.match(_MEASUREMENT_REGEX, line)
160 if match:
161 measurement_name = match.group(1)
162 measurement_result = match.group(2)
163 try:
164 measurement_results[measurement_name] = float(measurement_result)
165 except ValueError:
166 pass
167 return measurement_results
145 168
146 169
147 def main(): 170 def main():
148 parser = argparse.ArgumentParser( 171 parser = argparse.ArgumentParser(
149 formatter_class=argparse.RawDescriptionHelpFormatter, 172 formatter_class=argparse.RawDescriptionHelpFormatter,
150 description=_DESCRIPTION) 173 description=_DESCRIPTION)
151 parser.add_argument('benchmark_list_file', type=file, 174 parser.add_argument('benchmark_list_file', type=file,
152 help='a file listing benchmarks to run') 175 help='a file listing benchmarks to run')
153 parser.add_argument('--save-traces', action='store_true', 176 parser.add_argument('--save-traces', action='store_true',
154 help='save the traces produced by benchmarks to disk') 177 help='save the traces produced by benchmarks to disk')
155 178
156 # Common shell configuration arguments. 179 # Common shell configuration arguments.
157 shell_config.add_shell_arguments(parser) 180 shell_config.add_shell_arguments(parser)
158 script_args = parser.parse_args() 181 script_args = parser.parse_args()
159 config = shell_config.get_shell_config(script_args) 182 config = shell_config.get_shell_config(script_args)
160 183
161 try: 184 try:
162 shell, common_shell_args = shell_arguments.get_shell(config, []) 185 shell, common_shell_args = shell_arguments.get_shell(config, [])
163 except shell_arguments.ShellConfigurationException as e: 186 except shell_arguments.ShellConfigurationException as e:
164 print e 187 print e
165 return 1 188 return 1
166 189
167 target_os = 'android' if script_args.android else 'linux' 190 target_os = 'android' if script_args.android else 'linux'
168 benchmark_list_params = {"target_os": target_os} 191 benchmark_list_params = {"target_os": target_os}
169 exec script_args.benchmark_list_file in benchmark_list_params 192 exec script_args.benchmark_list_file in benchmark_list_params
170 193
171 succeeded = True 194 exit_code = 0
172 for benchmark_spec in benchmark_list_params['benchmarks']: 195 for benchmark_spec in benchmark_list_params['benchmarks']:
173 for variant_spec in _generate_benchmark_variants(benchmark_spec): 196 for variant_spec in _generate_benchmark_variants(benchmark_spec):
174 name = variant_spec['name'] 197 name = variant_spec['name']
175 app = variant_spec['app'] 198 app = variant_spec['app']
176 duration = variant_spec['duration'] 199 duration = variant_spec['duration']
177 shell_args = variant_spec.get('shell-args', []) + common_shell_args 200 shell_args = variant_spec.get('shell-args', []) + common_shell_args
178 measurements = variant_spec['measurements'] 201 measurements = variant_spec['measurements']
179 _run_benchmark(shell, shell_args, name, app, duration, measurements, 202 benchmark_succeeded, benchmark_error, output = _run_benchmark(
180 script_args.verbose, script_args.android, 203 shell, shell_args, name, app, duration, measurements,
181 script_args.save_traces) 204 script_args.verbose, script_args.android,
205 script_args.save_traces)
182 206
183 return 0 if succeeded else 1 207 print '[ %s ]' % name
208
209 some_measurements_failed = False
210 if benchmark_succeeded:
211 measurement_results = _parse_measurement_results(output)
212 # Iterate over the list of specs, not the dictionary, to detect missing
213 # results and preserve the required order.
214 for measurement_spec in measurements:
215 if measurement_spec in measurement_results:
216 print '%s %s' % (measurement_spec,
217 measurement_results[measurement_spec])
218 else:
219 print '%s ?' % measurement_spec
220 some_measurements_failed = True
221
222 if not benchmark_succeeded or some_measurements_failed:
223 if not benchmark_succeeded:
224 print 'benchmark failed: ' + benchmark_error
225 if some_measurements_failed:
226 print 'some measurements failed'
227 print 'output: '
228 print '-' * 72
229 print output
230 print '-' * 72
231 exit_code = 1
232
233 return exit_code
184 234
185 if __name__ == '__main__': 235 if __name__ == '__main__':
186 sys.exit(main()) 236 sys.exit(main())
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698