Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright 2015 The Chromium Authors. All rights reserved. | 2 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Runner for Mojo application benchmarks.""" | 6 """Runner for Mojo application benchmarks.""" |
| 7 | 7 |
| 8 import argparse | 8 import argparse |
| 9 import logging | 9 import logging |
| 10 import sys | 10 import sys |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 43 ['android', 'linux'], indicating the system on which the benchmarks are to be | 43 ['android', 'linux'], indicating the system on which the benchmarks are to be |
| 44 run. | 44 run. |
| 45 """ | 45 """ |
| 46 | 46 |
| 47 _logger = logging.getLogger() | 47 _logger = logging.getLogger() |
| 48 | 48 |
| 49 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo' | 49 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo' |
| 50 _CACHE_SERVICE_URL = 'mojo:url_response_disk_cache' | 50 _CACHE_SERVICE_URL = 'mojo:url_response_disk_cache' |
| 51 _NETWORK_SERVICE_URL = 'mojo:network_service' | 51 _NETWORK_SERVICE_URL = 'mojo:network_service' |
| 52 | 52 |
| 53 _COLD_START_SHELL_ARGS = [ | |
| 54 '--args-for=%s %s' % (_CACHE_SERVICE_URL, '--clear'), | |
| 55 '--args-for=%s %s' % (_NETWORK_SERVICE_URL, '--clear'), | |
| 56 ] | |
| 57 | |
| 53 # Additional time in seconds allocated per shell run to accommodate start-up. | 58 # Additional time in seconds allocated per shell run to accommodate start-up. |
| 54 # The shell should terminate before hitting this time out, it is an error if it | 59 # The shell should terminate before hitting this time out, it is an error if it |
| 55 # doesn't. | 60 # doesn't. |
| 56 _EXTRA_TIMEOUT = 20 | 61 _EXTRA_TIMEOUT = 20 |
| 57 | 62 |
| 58 | 63 |
| 59 def _get_output_file(shell, name, cold_start): | 64 def _rewrite_benchmark_list(benchmark_list): |
| 60 file_name = 'benchmark-%s-%s-%s.trace' % ( | 65 """Rewrites the specification of benchmarks to run, yielding two benchmarks |
| 61 name.replace(' ', '_'), | 66 (one for cold and one for warm start) for each benchmark defined in the input |
| 62 'cold_start' if cold_start else 'warm_start', | 67 list. |
| 63 time.strftime('%Y%m%d%H%M%S')) | 68 """ |
| 64 return file_name | 69 result = list() |
|
viettrungluu
2015/10/14 21:24:16
Probably "= []" would look more normal.
ppi
2015/10/14 21:47:27
Done.
| |
| 70 for benchmark_spec in benchmark_list: | |
|
viettrungluu
2015/10/14 21:24:16
I wonder if you shouldn't have the function be: _g
ppi
2015/10/14 21:47:27
Sg, done.
| |
| 71 # Cold start. | |
| 72 result.append({ | |
| 73 'name': benchmark_spec['name'] + ' (cold start)', | |
| 74 'app': benchmark_spec['app'], | |
| 75 'duration': benchmark_spec['duration'], | |
| 76 'measurements': benchmark_spec['measurements'], | |
| 77 'shell-args': benchmark_spec.get('shell-args', | |
| 78 []) + _COLD_START_SHELL_ARGS}) | |
| 79 # Warm start. | |
| 80 result.append({ | |
| 81 'name': benchmark_spec['name'] + ' (warm start)', | |
| 82 'app': benchmark_spec['app'], | |
| 83 'duration': benchmark_spec['duration'], | |
| 84 'measurements': benchmark_spec['measurements'], | |
| 85 'shell-args': benchmark_spec.get('shell-args', [])}) | |
| 86 return result | |
| 65 | 87 |
| 66 | 88 |
| 67 def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements, | 89 def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements, |
| 68 cold_start, verbose, android, save_traces): | 90 verbose, android, save_traces): |
| 69 """Runs `benchmark.mojo` in shell with correct arguments, parses and | 91 """Runs `benchmark.mojo` in shell with correct arguments, parses and |
| 70 presents the benchmark results. | 92 presents the benchmark results. |
| 71 """ | 93 """ |
| 72 timeout = duration_seconds + _EXTRA_TIMEOUT | 94 timeout = duration_seconds + _EXTRA_TIMEOUT |
| 73 benchmark_args = [] | 95 benchmark_args = [] |
| 74 benchmark_args.append('--app=' + app) | 96 benchmark_args.append('--app=' + app) |
| 75 benchmark_args.append('--duration=' + str(duration_seconds)) | 97 benchmark_args.append('--duration=' + str(duration_seconds)) |
| 76 | 98 |
| 77 output_file = None | 99 output_file = None |
| 78 device_output_file = None | 100 device_output_file = None |
| 79 if save_traces: | 101 if save_traces: |
| 80 output_file = _get_output_file(shell, name, cold_start) | 102 output_file = 'benchmark-%s-%s.trace' % (name.replace(' ', '_'), |
| 103 time.strftime('%Y%m%d%H%M%S')) | |
| 81 if android: | 104 if android: |
| 82 device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file) | 105 device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file) |
| 83 benchmark_args.append('--trace-output=' + device_output_file) | 106 benchmark_args.append('--trace-output=' + device_output_file) |
| 84 else: | 107 else: |
| 85 benchmark_args.append('--trace-output=' + output_file) | 108 benchmark_args.append('--trace-output=' + output_file) |
| 86 | 109 |
| 87 for measurement in measurements: | 110 for measurement in measurements: |
| 88 benchmark_args.append(measurement) | 111 benchmark_args.append(measurement) |
| 89 | 112 |
| 90 shell_args = list(shell_args) | 113 shell_args = list(shell_args) |
| 91 shell_args.append(_BENCHMARK_APP) | 114 shell_args.append(_BENCHMARK_APP) |
| 92 shell_args.append('--force-offline-by-default') | 115 shell_args.append('--force-offline-by-default') |
| 93 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP, | 116 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP, |
| 94 ' '.join(benchmark_args))) | 117 ' '.join(benchmark_args))) |
| 95 | 118 |
| 96 if cold_start: | |
| 97 shell_args.append('--args-for=%s %s' % (_CACHE_SERVICE_URL, '--clear')) | |
| 98 shell_args.append('--args-for=%s %s' % (_NETWORK_SERVICE_URL, '--clear')) | |
| 99 | |
| 100 if verbose: | 119 if verbose: |
| 101 print 'shell arguments: ' + str(shell_args) | 120 print 'shell arguments: ' + str(shell_args) |
| 102 print '[ %s ] %s' % (name, 'cold start' if cold_start else 'warm start') | 121 print '[ %s ]' % name |
| 103 return_code, output, did_time_out = shell.run_and_get_output( | 122 return_code, output, did_time_out = shell.run_and_get_output( |
| 104 shell_args, timeout=timeout) | 123 shell_args, timeout=timeout) |
| 105 output_lines = [line.strip() for line in output.split('\n')] | 124 output_lines = [line.strip() for line in output.split('\n')] |
| 106 | 125 |
| 107 if return_code or did_time_out or 'benchmark succeeded' not in output_lines: | 126 if return_code or did_time_out or 'benchmark succeeded' not in output_lines: |
| 108 print 'timed out' if did_time_out else 'failed' | 127 print 'timed out' if did_time_out else 'failed' |
| 109 if return_code: | 128 if return_code: |
| 110 print 'Return code: ' + str(return_code) | 129 print 'Return code: ' + str(return_code) |
| 111 print 'Output: ' | 130 print 'Output: ' |
| 112 print output | 131 print output |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 139 | 158 |
| 140 try: | 159 try: |
| 141 shell, common_shell_args = shell_arguments.get_shell(config, []) | 160 shell, common_shell_args = shell_arguments.get_shell(config, []) |
| 142 except shell_arguments.ShellConfigurationException as e: | 161 except shell_arguments.ShellConfigurationException as e: |
| 143 print e | 162 print e |
| 144 return 1 | 163 return 1 |
| 145 | 164 |
| 146 target_os = 'android' if script_args.android else 'linux' | 165 target_os = 'android' if script_args.android else 'linux' |
| 147 benchmark_list_params = {"target_os": target_os} | 166 benchmark_list_params = {"target_os": target_os} |
| 148 exec script_args.benchmark_list_file in benchmark_list_params | 167 exec script_args.benchmark_list_file in benchmark_list_params |
| 149 benchmark_list = benchmark_list_params['benchmarks'] | 168 benchmark_list = _rewrite_benchmark_list(benchmark_list_params['benchmarks']) |
| 150 | 169 |
| 151 succeeded = True | 170 succeeded = True |
| 152 for benchmark_spec in benchmark_list: | 171 for benchmark_spec in benchmark_list: |
| 153 name = benchmark_spec['name'] | 172 name = benchmark_spec['name'] |
| 154 app = benchmark_spec['app'] | 173 app = benchmark_spec['app'] |
| 155 duration = benchmark_spec['duration'] | 174 duration = benchmark_spec['duration'] |
| 156 shell_args = benchmark_spec.get('shell-args', []) + common_shell_args | 175 shell_args = benchmark_spec.get('shell-args', []) + common_shell_args |
| 157 measurements = benchmark_spec['measurements'] | 176 measurements = benchmark_spec['measurements'] |
| 158 _run_benchmark(shell, shell_args, name, app, duration, measurements, | 177 _run_benchmark(shell, shell_args, name, app, duration, measurements, |
| 159 cold_start=True, verbose=script_args.verbose, | 178 verbose=script_args.verbose, android=script_args.android, |
|
viettrungluu
2015/10/14 21:24:16
I find it odd that you give these things as keywor
ppi
2015/10/14 21:47:27
Done.
| |
| 160 android=script_args.android, | |
| 161 save_traces=script_args.save_traces) | |
| 162 _run_benchmark(shell, shell_args, name, app, duration, measurements, | |
| 163 cold_start=False, verbose=script_args.verbose, | |
| 164 android=script_args.android, | |
| 165 save_traces=script_args.save_traces) | 179 save_traces=script_args.save_traces) |
| 166 | 180 |
| 167 return 0 if succeeded else 1 | 181 return 0 if succeeded else 1 |
| 168 | 182 |
| 169 if __name__ == '__main__': | 183 if __name__ == '__main__': |
| 170 sys.exit(main()) | 184 sys.exit(main()) |
| OLD | NEW |