OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright 2015 The Chromium Authors. All rights reserved. | 2 # Copyright 2015 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Runner for Mojo application benchmarks.""" | 6 """Runner for Mojo application benchmarks.""" |
7 | 7 |
8 import argparse | 8 import argparse |
9 import logging | 9 import logging |
10 import sys | 10 import sys |
11 import time | 11 import time |
| 12 import os.path |
12 | 13 |
13 from devtoolslib import shell_arguments | 14 from devtoolslib import shell_arguments |
14 from devtoolslib import shell_config | 15 from devtoolslib import shell_config |
15 | 16 |
16 | 17 |
17 _DESCRIPTION = """Runner for Mojo application benchmarks. | 18 _DESCRIPTION = """Runner for Mojo application benchmarks. |
18 | 19 |
19 |benchmark_list_file| has to be a valid Python program that sets a |benchmarks| | 20 |benchmark_list_file| has to be a valid Python program that sets a |benchmarks| |
20 global variable, containing entries of the following form: | 21 global variable, containing entries of the following form: |
21 | 22 |
(...skipping 24 matching lines...) Expand all Loading... |
46 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo' | 47 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo' |
47 _CACHE_SERVICE_URL = 'mojo:url_response_disk_cache' | 48 _CACHE_SERVICE_URL = 'mojo:url_response_disk_cache' |
48 _NETWORK_SERVICE_URL = 'mojo:network_service' | 49 _NETWORK_SERVICE_URL = 'mojo:network_service' |
49 | 50 |
50 # Additional time in seconds allocated per shell run to accommodate start-up. | 51 # Additional time in seconds allocated per shell run to accommodate start-up. |
51 # The shell should terminate before hitting this time out, it is an error if it | 52 # The shell should terminate before hitting this time out, it is an error if it |
52 # doesn't. | 53 # doesn't. |
53 _EXTRA_TIMEOUT = 20 | 54 _EXTRA_TIMEOUT = 20 |
54 | 55 |
55 | 56 |
| 57 def _get_output_file(shell, name, cold_start): |
| 58 file_name = 'benchmark-%s-%s-%s.trace' % ( |
| 59 name.replace(' ', '_'), |
| 60 'cold_start' if cold_start else 'warm_start', |
| 61 time.strftime('%Y%m%d%H%M%S')) |
| 62 return file_name |
| 63 |
| 64 |
56 def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements, | 65 def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements, |
57 cold_start, verbose, save_traces): | 66 cold_start, verbose, android, save_traces): |
58 """Runs `benchmark.mojo` in shell with correct arguments, parses and | 67 """Runs `benchmark.mojo` in shell with correct arguments, parses and |
59 presents the benchmark results. | 68 presents the benchmark results. |
60 """ | 69 """ |
61 timeout = duration_seconds + _EXTRA_TIMEOUT | 70 timeout = duration_seconds + _EXTRA_TIMEOUT |
62 benchmark_args = [] | 71 benchmark_args = [] |
63 benchmark_args.append('--app=' + app) | 72 benchmark_args.append('--app=' + app) |
64 benchmark_args.append('--duration=' + str(duration_seconds)) | 73 benchmark_args.append('--duration=' + str(duration_seconds)) |
| 74 |
| 75 output_file = None |
| 76 device_output_file = None |
65 if save_traces: | 77 if save_traces: |
66 trace_output_file = 'benchmark-%s-%s-%s.trace' % ( | 78 output_file = _get_output_file(shell, name, cold_start) |
67 name.replace(' ', '_'), | 79 if android: |
68 'cold_start' if cold_start else 'warm_start', | 80 device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file) |
69 time.strftime('%Y%m%d%H%M%S')) | 81 benchmark_args.append('--trace-output=' + device_output_file) |
70 benchmark_args.append('--trace-output=' + trace_output_file) | 82 else: |
| 83 benchmark_args.append('--trace-output=' + output_file) |
71 | 84 |
72 for measurement in measurements: | 85 for measurement in measurements: |
73 benchmark_args.append(measurement) | 86 benchmark_args.append(measurement) |
74 | 87 |
75 shell_args = list(shell_args) | 88 shell_args = list(shell_args) |
76 shell_args.append(_BENCHMARK_APP) | 89 shell_args.append(_BENCHMARK_APP) |
77 shell_args.append('--force-offline-by-default') | 90 shell_args.append('--force-offline-by-default') |
78 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP, | 91 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP, |
79 ' '.join(benchmark_args))) | 92 ' '.join(benchmark_args))) |
80 | 93 |
(...skipping 14 matching lines...) Expand all Loading... |
95 print 'Return code: ' + str(return_code) | 108 print 'Return code: ' + str(return_code) |
96 print 'Output: ' | 109 print 'Output: ' |
97 print output | 110 print output |
98 print '-' * 72 | 111 print '-' * 72 |
99 return False | 112 return False |
100 | 113 |
101 # Echo measurement results. | 114 # Echo measurement results. |
102 for line in output_lines: | 115 for line in output_lines: |
103 if line.strip().startswith('measurement:') or 'WARNING' in line: | 116 if line.strip().startswith('measurement:') or 'WARNING' in line: |
104 print line | 117 print line |
| 118 |
| 119 if device_output_file: |
| 120 shell.pull_file(device_output_file, output_file, remove_original=True) |
105 return True | 121 return True |
106 | 122 |
107 | 123 |
108 def main(): | 124 def main(): |
109 parser = argparse.ArgumentParser( | 125 parser = argparse.ArgumentParser( |
110 formatter_class=argparse.RawDescriptionHelpFormatter, | 126 formatter_class=argparse.RawDescriptionHelpFormatter, |
111 description=_DESCRIPTION) | 127 description=_DESCRIPTION) |
112 parser.add_argument('benchmark_list_file', type=file, | 128 parser.add_argument('benchmark_list_file', type=file, |
113 help='a file listing benchmarks to run') | 129 help='a file listing benchmarks to run') |
114 parser.add_argument('--save-traces', action='store_true', | 130 parser.add_argument('--save-traces', action='store_true', |
(...skipping 17 matching lines...) Expand all Loading... |
132 | 148 |
133 succeeded = True | 149 succeeded = True |
134 for benchmark_spec in benchmark_list: | 150 for benchmark_spec in benchmark_list: |
135 name = benchmark_spec['name'] | 151 name = benchmark_spec['name'] |
136 app = benchmark_spec['app'] | 152 app = benchmark_spec['app'] |
137 duration = benchmark_spec['duration'] | 153 duration = benchmark_spec['duration'] |
138 shell_args = benchmark_spec.get('shell-args', []) + common_shell_args | 154 shell_args = benchmark_spec.get('shell-args', []) + common_shell_args |
139 measurements = benchmark_spec['measurements'] | 155 measurements = benchmark_spec['measurements'] |
140 _run_benchmark(shell, shell_args, name, app, duration, measurements, | 156 _run_benchmark(shell, shell_args, name, app, duration, measurements, |
141 cold_start=True, verbose=script_args.verbose, | 157 cold_start=True, verbose=script_args.verbose, |
| 158 android=script_args.android, |
142 save_traces=script_args.save_traces) | 159 save_traces=script_args.save_traces) |
143 _run_benchmark(shell, shell_args, name, app, duration, measurements, | 160 _run_benchmark(shell, shell_args, name, app, duration, measurements, |
144 cold_start=False, verbose=script_args.verbose, | 161 cold_start=False, verbose=script_args.verbose, |
| 162 android=script_args.android, |
145 save_traces=script_args.save_traces) | 163 save_traces=script_args.save_traces) |
146 | 164 |
147 return 0 if succeeded else 1 | 165 return 0 if succeeded else 1 |
148 | 166 |
149 if __name__ == '__main__': | 167 if __name__ == '__main__': |
150 sys.exit(main()) | 168 sys.exit(main()) |
OLD | NEW |