OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright 2015 The Chromium Authors. All rights reserved. | 2 # Copyright 2015 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Runner for Mojo application benchmarks.""" | 6 """Runner for Mojo application benchmarks.""" |
7 | 7 |
8 import argparse | 8 import argparse |
9 import logging | 9 import logging |
10 import sys | 10 import sys |
11 import time | 11 import time |
12 import os.path | 12 import os.path |
13 import re | 13 import re |
14 | 14 |
15 from devtoolslib import shell_arguments | 15 from devtoolslib import shell_arguments |
16 from devtoolslib import shell_config | 16 from devtoolslib import shell_config |
17 from devtoolslib import performance_dashboard | 17 from devtoolslib import perf_dashboard |
18 | 18 |
19 | 19 |
20 _DESCRIPTION = """Runner for Mojo application benchmarks. | 20 _DESCRIPTION = """Runner for Mojo application benchmarks. |
21 | 21 |
22 |benchmark_list_file| has to be a valid Python program that sets a |benchmarks| | 22 |benchmark_list_file| has to be a valid Python program that sets a |benchmarks| |
23 global variable, containing entries of the following form: | 23 global variable, containing entries of the following form: |
24 | 24 |
25 { | 25 { |
26 'name': '<name of the benchmark>', | 26 'name': '<name of the benchmark>', |
27 'app': '<url of the app to benchmark>', | 27 'app': '<url of the app to benchmark>', |
(...skipping 160 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
188 try: | 188 try: |
189 shell, common_shell_args = shell_arguments.get_shell(config, []) | 189 shell, common_shell_args = shell_arguments.get_shell(config, []) |
190 except shell_arguments.ShellConfigurationException as e: | 190 except shell_arguments.ShellConfigurationException as e: |
191 print e | 191 print e |
192 return 1 | 192 return 1 |
193 | 193 |
194 target_os = 'android' if script_args.android else 'linux' | 194 target_os = 'android' if script_args.android else 'linux' |
195 benchmark_list_params = {"target_os": target_os} | 195 benchmark_list_params = {"target_os": target_os} |
196 exec script_args.benchmark_list_file in benchmark_list_params | 196 exec script_args.benchmark_list_file in benchmark_list_params |
197 | 197 |
198 chart_data_recorder = None | |
199 if script_args.chart_data_output_file: | |
200 chart_data_recorder = performance_dashboard.ChartDataRecorder() | |
201 | |
202 exit_code = 0 | 198 exit_code = 0 |
203 for benchmark_spec in benchmark_list_params['benchmarks']: | 199 for benchmark_spec in benchmark_list_params['benchmarks']: |
204 for variant_spec in _generate_benchmark_variants(benchmark_spec): | 200 for variant_spec in _generate_benchmark_variants(benchmark_spec): |
205 name = variant_spec['name'] | 201 name = variant_spec['name'] |
206 app = variant_spec['app'] | 202 app = variant_spec['app'] |
207 duration = variant_spec['duration'] | 203 duration = variant_spec['duration'] |
208 shell_args = variant_spec.get('shell-args', []) + common_shell_args | 204 shell_args = variant_spec.get('shell-args', []) + common_shell_args |
209 measurements = variant_spec['measurements'] | 205 measurements = variant_spec['measurements'] |
206 | |
207 chart_data_recorder = None | |
208 if script_args.chart_data_output_file: | |
209 chart_data_recorder = perf_dashboard.ChartDataRecorder(name) | |
210 benchmark_succeeded, benchmark_error, output = _run_benchmark( | 210 benchmark_succeeded, benchmark_error, output = _run_benchmark( |
211 shell, shell_args, name, app, duration, measurements, | 211 shell, shell_args, name, app, duration, measurements, |
212 script_args.verbose, script_args.android, | 212 script_args.verbose, script_args.android, |
213 script_args.save_traces) | 213 script_args.save_traces) |
214 | 214 |
215 print '[ %s ]' % name | 215 print '[ %s ]' % name |
216 | 216 |
217 some_measurements_failed = False | 217 some_measurements_failed = False |
218 if benchmark_succeeded: | 218 if benchmark_succeeded: |
219 measurement_results = _parse_measurement_results(output) | 219 measurement_results = _parse_measurement_results(output) |
(...skipping 16 matching lines...) Expand all Loading... | |
236 if not benchmark_succeeded: | 236 if not benchmark_succeeded: |
237 print 'benchmark failed: ' + benchmark_error | 237 print 'benchmark failed: ' + benchmark_error |
238 if some_measurements_failed: | 238 if some_measurements_failed: |
239 print 'some measurements failed' | 239 print 'some measurements failed' |
240 print 'output: ' | 240 print 'output: ' |
241 print '-' * 72 | 241 print '-' * 72 |
242 print output | 242 print output |
243 print '-' * 72 | 243 print '-' * 72 |
244 exit_code = 1 | 244 exit_code = 1 |
245 | 245 |
246 if script_args.chart_data_output_file: | 246 if script_args.chart_data_output_file: |
247 script_args.chart_data_output_file.write(chart_data_recorder.get_json()) | 247 script_args.chart_data_output_file.write(chart_data_recorder.get_json()) |
etiennej
2015/10/23 10:02:12
Do you want to add a newline at the end of the jso
ppi
2015/10/23 10:59:12
Done.
| |
248 | 248 |
249 return exit_code | 249 return exit_code |
250 | 250 |
251 if __name__ == '__main__': | 251 if __name__ == '__main__': |
252 sys.exit(main()) | 252 sys.exit(main()) |
OLD | NEW |