Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(70)

Side by Side Diff: mojo/devtools/common/mojo_benchmark

Issue 1427463003: Teach `mojo_benchmark` to upload results to perf dashboard. (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « mojo/devtools/common/docs/mojo_benchmark.md ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2015 The Chromium Authors. All rights reserved. 2 # Copyright 2015 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runner for Mojo application benchmarks.""" 6 """Runner for Mojo application benchmarks."""
7 7
8 import argparse 8 import argparse
9 import json 9 import json
10 import logging 10 import logging
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after
165 165
166 166
167 def main(): 167 def main():
168 parser = argparse.ArgumentParser( 168 parser = argparse.ArgumentParser(
169 formatter_class=argparse.RawDescriptionHelpFormatter, 169 formatter_class=argparse.RawDescriptionHelpFormatter,
170 description=_DESCRIPTION) 170 description=_DESCRIPTION)
171 parser.add_argument('benchmark_list_file', type=file, 171 parser.add_argument('benchmark_list_file', type=file,
172 help='a file listing benchmarks to run') 172 help='a file listing benchmarks to run')
173 parser.add_argument('--save-traces', action='store_true', 173 parser.add_argument('--save-traces', action='store_true',
174 help='save the traces produced by benchmarks to disk') 174 help='save the traces produced by benchmarks to disk')
175 parser.add_argument('--chart-data-output-file', type=argparse.FileType('w'), 175 perf_dashboard.add_argparse_server_arguments(parser)
176 help='file to write chart data for the performance '
177 'dashboard to')
178 176
179 # Common shell configuration arguments. 177 # Common shell configuration arguments.
180 shell_config.add_shell_arguments(parser) 178 shell_config.add_shell_arguments(parser)
181 script_args = parser.parse_args() 179 script_args = parser.parse_args()
182 config = shell_config.get_shell_config(script_args) 180 config = shell_config.get_shell_config(script_args)
183 181
184 try: 182 try:
185 shell, common_shell_args = shell_arguments.get_shell(config, []) 183 shell, common_shell_args = shell_arguments.get_shell(config, [])
186 except shell_arguments.ShellConfigurationException as e: 184 except shell_arguments.ShellConfigurationException as e:
187 print e 185 print e
188 return 1 186 return 1
189 187
190 target_os = 'android' if script_args.android else 'linux' 188 target_os = 'android' if script_args.android else 'linux'
191 benchmark_list_params = {"target_os": target_os} 189 benchmark_list_params = {"target_os": target_os}
192 exec script_args.benchmark_list_file in benchmark_list_params 190 exec script_args.benchmark_list_file in benchmark_list_params
193 191
194 exit_code = 0 192 exit_code = 0
195 for benchmark_spec in benchmark_list_params['benchmarks']: 193 for benchmark_spec in benchmark_list_params['benchmarks']:
196 benchmark_name = benchmark_spec['name'] 194 benchmark_name = benchmark_spec['name']
197 chart_data_recorder = None
198 if script_args.chart_data_output_file:
199 chart_data_recorder = perf_dashboard.ChartDataRecorder(benchmark_name)
200 195
201 for variant_spec in _generate_benchmark_variants(benchmark_spec): 196 for variant_spec in _generate_benchmark_variants(benchmark_spec):
202 variant_name = variant_spec['variant_name'] 197 variant_name = variant_spec['variant_name']
203 app = variant_spec['app'] 198 app = variant_spec['app']
204 duration = variant_spec['duration'] 199 duration = variant_spec['duration']
205 shell_args = variant_spec.get('shell-args', []) + common_shell_args 200 shell_args = variant_spec.get('shell-args', []) + common_shell_args
206 measurements = variant_spec['measurements'] 201 measurements = variant_spec['measurements']
207 202
208 output_file = None 203 output_file = None
209 if script_args.save_traces: 204 if script_args.save_traces:
210 output_file = 'benchmark-%s-%s-%s.trace' % ( 205 output_file = 'benchmark-%s-%s-%s.trace' % (
211 benchmark_name.replace(' ', '_'), 206 benchmark_name.replace(' ', '_'),
212 variant_name.replace(' ', '_'), 207 variant_name.replace(' ', '_'),
213 time.strftime('%Y%m%d%H%M%S')) 208 time.strftime('%Y%m%d%H%M%S'))
209
210 chart_data_recorder = None
211 if script_args.upload:
212 chart_data_recorder = perf_dashboard.ChartDataRecorder(
213 script_args.test_name)
214
214 benchmark_succeeded, benchmark_error, output = _run_benchmark( 215 benchmark_succeeded, benchmark_error, output = _run_benchmark(
215 shell, shell_args, variant_name, app, duration, measurements, 216 shell, shell_args, variant_name, app, duration, measurements,
216 script_args.verbose, script_args.android, output_file) 217 script_args.verbose, script_args.android, output_file)
217 218
218 print '[ %s ] %s ' % (benchmark_name, variant_name) 219 print '[ %s ] %s ' % (benchmark_name, variant_name)
219 220
220 some_measurements_failed = False 221 some_measurements_failed = False
221 if benchmark_succeeded: 222 if benchmark_succeeded:
222 measurement_results = _parse_measurement_results(output) 223 measurement_results = _parse_measurement_results(output)
223 # Iterate over the list of specs, not the dictionary, to detect missing 224 # Iterate over the list of specs, not the dictionary, to detect missing
224 # results and preserve the required order. 225 # results and preserve the required order.
225 for measurement_spec in measurements: 226 for measurement_spec in measurements:
226 if measurement_spec in measurement_results: 227 if measurement_spec in measurement_results:
227 result = measurement_results[measurement_spec] 228 result = measurement_results[measurement_spec]
228 print '%s %s' % (measurement_spec, result) 229 print '%s %s' % (measurement_spec, result)
229 230
230 if chart_data_recorder: 231 if chart_data_recorder:
231 measurement_name = measurement_spec.replace('/', '-') 232 chart_name = benchmark_name + '__' + variant_name
232 chart_data_recorder.record_scalar(variant_name, measurement_name, 233 chart_data_recorder.record_scalar(
233 'ms', result) 234 perf_dashboard.normalize_label(chart_name),
235 perf_dashboard.normalize_label(measurement_spec),
236 'ms', result)
234 else: 237 else:
235 print '%s ?' % measurement_spec 238 print '%s ?' % measurement_spec
236 some_measurements_failed = True 239 some_measurements_failed = True
237 240
238 if not benchmark_succeeded or some_measurements_failed: 241 if not benchmark_succeeded or some_measurements_failed:
239 if not benchmark_succeeded: 242 if not benchmark_succeeded:
240 print 'benchmark failed: ' + benchmark_error 243 print 'benchmark failed: ' + benchmark_error
241 if some_measurements_failed: 244 if some_measurements_failed:
242 print 'some measurements failed' 245 print 'some measurements failed'
243 print 'output: ' 246 print 'output: '
244 print '-' * 72 247 print '-' * 72
245 print output 248 print output
246 print '-' * 72 249 print '-' * 72
247 exit_code = 1 250 exit_code = 1
248 251
249 if script_args.chart_data_output_file: 252 if script_args.upload:
250 script_args.chart_data_output_file.write( 253 perf_dashboard.upload_chart_data(
251 json.dumps(chart_data_recorder.get_chart_data())) 254 script_args.master_name, script_args.bot_name,
252 script_args.chart_data_output_file.write('\n') 255 script_args.test_name, script_args.builder_name,
256 script_args.build_number, chart_data_recorder.get_chart_data(),
257 script_args.server_url, script_args.dry_run)
253 258
254 return exit_code 259 return exit_code
255 260
256 if __name__ == '__main__': 261 if __name__ == '__main__':
257 sys.exit(main()) 262 sys.exit(main())
OLDNEW
« no previous file with comments | « mojo/devtools/common/docs/mojo_benchmark.md ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698