Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(127)

Side by Side Diff: mojo/devtools/common/mojo_benchmark

Issue 1406063002: Teach mojo_benchmark to produce chart_data for the perf dashboard. (Closed) Base URL: https://github.com/domokit/mojo.git@master
Patch Set: Rebase. Created 5 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « mojo/devtools/common/devtoolslib/perf_dashboard_unittest.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2015 The Chromium Authors. All rights reserved. 2 # Copyright 2015 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runner for Mojo application benchmarks.""" 6 """Runner for Mojo application benchmarks."""
7 7
8 import argparse 8 import argparse
9 import logging 9 import logging
10 import sys 10 import sys
11 import time 11 import time
12 import os.path 12 import os.path
13 import re 13 import re
14 14
15 from devtoolslib import shell_arguments 15 from devtoolslib import shell_arguments
16 from devtoolslib import shell_config 16 from devtoolslib import shell_config
17 from devtoolslib import performance_dashboard
17 18
18 19
19 _DESCRIPTION = """Runner for Mojo application benchmarks. 20 _DESCRIPTION = """Runner for Mojo application benchmarks.
20 21
21 |benchmark_list_file| has to be a valid Python program that sets a |benchmarks| 22 |benchmark_list_file| has to be a valid Python program that sets a |benchmarks|
22 global variable, containing entries of the following form: 23 global variable, containing entries of the following form:
23 24
24 { 25 {
25 'name': '<name of the benchmark>', 26 'name': '<name of the benchmark>',
26 'app': '<url of the app to benchmark>', 27 'app': '<url of the app to benchmark>',
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after
168 169
169 170
170 def main(): 171 def main():
171 parser = argparse.ArgumentParser( 172 parser = argparse.ArgumentParser(
172 formatter_class=argparse.RawDescriptionHelpFormatter, 173 formatter_class=argparse.RawDescriptionHelpFormatter,
173 description=_DESCRIPTION) 174 description=_DESCRIPTION)
174 parser.add_argument('benchmark_list_file', type=file, 175 parser.add_argument('benchmark_list_file', type=file,
175 help='a file listing benchmarks to run') 176 help='a file listing benchmarks to run')
176 parser.add_argument('--save-traces', action='store_true', 177 parser.add_argument('--save-traces', action='store_true',
177 help='save the traces produced by benchmarks to disk') 178 help='save the traces produced by benchmarks to disk')
179 parser.add_argument('--chart-data-output-file', type=argparse.FileType('w'),
180 help='file to write chart data for the performance '
181 'dashboard to')
178 182
179 # Common shell configuration arguments. 183 # Common shell configuration arguments.
180 shell_config.add_shell_arguments(parser) 184 shell_config.add_shell_arguments(parser)
181 script_args = parser.parse_args() 185 script_args = parser.parse_args()
182 config = shell_config.get_shell_config(script_args) 186 config = shell_config.get_shell_config(script_args)
183 187
184 try: 188 try:
185 shell, common_shell_args = shell_arguments.get_shell(config, []) 189 shell, common_shell_args = shell_arguments.get_shell(config, [])
186 except shell_arguments.ShellConfigurationException as e: 190 except shell_arguments.ShellConfigurationException as e:
187 print e 191 print e
188 return 1 192 return 1
189 193
190 target_os = 'android' if script_args.android else 'linux' 194 target_os = 'android' if script_args.android else 'linux'
191 benchmark_list_params = {"target_os": target_os} 195 benchmark_list_params = {"target_os": target_os}
192 exec script_args.benchmark_list_file in benchmark_list_params 196 exec script_args.benchmark_list_file in benchmark_list_params
193 197
198 chart_data_recorder = None
199 if script_args.chart_data_output_file:
200 chart_data_recorder = performance_dashboard.ChartDataRecorder()
201
194 exit_code = 0 202 exit_code = 0
195 for benchmark_spec in benchmark_list_params['benchmarks']: 203 for benchmark_spec in benchmark_list_params['benchmarks']:
196 for variant_spec in _generate_benchmark_variants(benchmark_spec): 204 for variant_spec in _generate_benchmark_variants(benchmark_spec):
197 name = variant_spec['name'] 205 name = variant_spec['name']
198 app = variant_spec['app'] 206 app = variant_spec['app']
199 duration = variant_spec['duration'] 207 duration = variant_spec['duration']
200 shell_args = variant_spec.get('shell-args', []) + common_shell_args 208 shell_args = variant_spec.get('shell-args', []) + common_shell_args
201 measurements = variant_spec['measurements'] 209 measurements = variant_spec['measurements']
202 benchmark_succeeded, benchmark_error, output = _run_benchmark( 210 benchmark_succeeded, benchmark_error, output = _run_benchmark(
203 shell, shell_args, name, app, duration, measurements, 211 shell, shell_args, name, app, duration, measurements,
204 script_args.verbose, script_args.android, 212 script_args.verbose, script_args.android,
205 script_args.save_traces) 213 script_args.save_traces)
206 214
207 print '[ %s ]' % name 215 print '[ %s ]' % name
208 216
209 some_measurements_failed = False 217 some_measurements_failed = False
210 if benchmark_succeeded: 218 if benchmark_succeeded:
211 measurement_results = _parse_measurement_results(output) 219 measurement_results = _parse_measurement_results(output)
212 # Iterate over the list of specs, not the dictionary, to detect missing 220 # Iterate over the list of specs, not the dictionary, to detect missing
213 # results and preserve the required order. 221 # results and preserve the required order.
214 for measurement_spec in measurements: 222 for measurement_spec in measurements:
215 if measurement_spec in measurement_results: 223 if measurement_spec in measurement_results:
216 print '%s %s' % (measurement_spec, 224 result = measurement_results[measurement_spec]
217 measurement_results[measurement_spec]) 225 print '%s %s' % (measurement_spec, result)
226
227 if chart_data_recorder:
228 measurement_name = measurement_spec.replace('/', '-')
229 chart_data_recorder.record_scalar(name, measurement_name, 'ms',
230 result)
218 else: 231 else:
219 print '%s ?' % measurement_spec 232 print '%s ?' % measurement_spec
220 some_measurements_failed = True 233 some_measurements_failed = True
221 234
222 if not benchmark_succeeded or some_measurements_failed: 235 if not benchmark_succeeded or some_measurements_failed:
223 if not benchmark_succeeded: 236 if not benchmark_succeeded:
224 print 'benchmark failed: ' + benchmark_error 237 print 'benchmark failed: ' + benchmark_error
225 if some_measurements_failed: 238 if some_measurements_failed:
226 print 'some measurements failed' 239 print 'some measurements failed'
227 print 'output: ' 240 print 'output: '
228 print '-' * 72 241 print '-' * 72
229 print output 242 print output
230 print '-' * 72 243 print '-' * 72
231 exit_code = 1 244 exit_code = 1
232 245
246 if script_args.chart_data_output_file:
247 script_args.chart_data_output_file.write(chart_data_recorder.get_json())
248
233 return exit_code 249 return exit_code
234 250
235 if __name__ == '__main__': 251 if __name__ == '__main__':
236 sys.exit(main()) 252 sys.exit(main())
OLDNEW
« no previous file with comments | « mojo/devtools/common/devtoolslib/perf_dashboard_unittest.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698