Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(79)

Side by Side Diff: mojo/devtools/common/mojo_benchmark

Issue 1391013005: Benchmark: `--save-all-traces` argument (Closed) Base URL: https://github.com/domokit/mojo.git@master
Patch Set: benchmark Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« apps/benchmark/benchmark_app.cc ('K') | « apps/benchmark/run_args.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2015 The Chromium Authors. All rights reserved. 2 # Copyright 2015 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runner for Mojo application benchmarks.""" 6 """Runner for Mojo application benchmarks."""
7 7
8 import argparse 8 import argparse
9 import logging 9 import logging
10 import os.path 10 import os.path
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
68 'shell-args': benchmark_spec.get('shell-args', 68 'shell-args': benchmark_spec.get('shell-args',
69 []) + _COLD_START_SHELL_ARGS}) 69 []) + _COLD_START_SHELL_ARGS})
70 variants.append({ 70 variants.append({
71 'variant_name': 'warm start', 71 'variant_name': 'warm start',
72 'app': benchmark_spec['app'], 72 'app': benchmark_spec['app'],
73 'duration': benchmark_spec['duration'], 73 'duration': benchmark_spec['duration'],
74 'measurements': benchmark_spec['measurements'], 74 'measurements': benchmark_spec['measurements'],
75 'shell-args': benchmark_spec.get('shell-args', [])}) 75 'shell-args': benchmark_spec.get('shell-args', [])})
76 return variants 76 return variants
77 77
78
ppi 2015/10/29 13:33:43 Keep this blank line, Python style guide wants two
nelly 2015/10/29 14:20:17 Done.
79 def _run_benchmark(shell, shell_args, app, duration_seconds, measurements, 78 def _run_benchmark(shell, shell_args, app, duration_seconds, measurements,
80 verbose, android, output_file): 79 verbose, android, output_file, trace_all):
81 """Runs the given benchmark by running `benchmark.mojo` in mojo shell with 80 """Runs the given benchmark by running `benchmark.mojo` in mojo shell with
82 appropriate arguments and returns the produced output. 81 appropriate arguments and returns the produced output.
83 82
84 Returns: 83 Returns:
85 A tuple of (succeeded, error_msg, output). 84 A tuple of (succeeded, error_msg, output).
86 """ 85 """
87 timeout = duration_seconds + _EXTRA_TIMEOUT 86 timeout = duration_seconds + _EXTRA_TIMEOUT
88 benchmark_args = [] 87 benchmark_args = []
89 benchmark_args.append('--app=' + app) 88 benchmark_args.append('--app=' + app)
90 benchmark_args.append('--duration=' + str(duration_seconds)) 89 benchmark_args.append('--duration=' + str(duration_seconds))
91 90
92 device_output_file = None 91 device_output_file = None
93 if output_file: 92 if output_file:
94 if android: 93 if android:
95 device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file) 94 device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file)
96 benchmark_args.append('--trace-output=' + device_output_file) 95 benchmark_args.append('--trace-output=' + device_output_file)
97 else: 96 else:
98 benchmark_args.append('--trace-output=' + output_file) 97 benchmark_args.append('--trace-output=' + output_file)
99 98
99 if trace_all:
100 benchmark_args.append('--trace-all')
101
100 for measurement in measurements: 102 for measurement in measurements:
101 benchmark_args.append(measurement['spec']) 103 benchmark_args.append(measurement['spec'])
102 104
103 shell_args = list(shell_args) 105 shell_args = list(shell_args)
104 shell_args.append(_BENCHMARK_APP) 106 shell_args.append(_BENCHMARK_APP)
105 shell_args.append('--force-offline-by-default') 107 shell_args.append('--force-offline-by-default')
106 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP, 108 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP,
107 ' '.join(benchmark_args))) 109 ' '.join(benchmark_args)))
108 110
109 if verbose: 111 if verbose:
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
143 145
144 146
145 def main(): 147 def main():
146 parser = argparse.ArgumentParser( 148 parser = argparse.ArgumentParser(
147 formatter_class=argparse.RawDescriptionHelpFormatter, 149 formatter_class=argparse.RawDescriptionHelpFormatter,
148 description=_DESCRIPTION) 150 description=_DESCRIPTION)
149 parser.add_argument('benchmark_list_file', type=file, 151 parser.add_argument('benchmark_list_file', type=file,
150 help='a file listing benchmarks to run') 152 help='a file listing benchmarks to run')
151 parser.add_argument('--save-traces', action='store_true', 153 parser.add_argument('--save-traces', action='store_true',
152 help='save the traces produced by benchmarks to disk') 154 help='save the traces produced by benchmarks to disk')
155 parser.add_argument('--trace-all', action='store_true',
156 help='trace all categories; ignored if not used with ' +
ppi 2015/10/29 13:33:43 Why ignore this if not used with --save-traces?
nelly 2015/10/29 14:20:17 With the current implementation we always print in
157 'the `--save-traces` argument')
153 perf_dashboard.add_argparse_server_arguments(parser) 158 perf_dashboard.add_argparse_server_arguments(parser)
154 159
155 # Common shell configuration arguments. 160 # Common shell configuration arguments.
156 shell_config.add_shell_arguments(parser) 161 shell_config.add_shell_arguments(parser)
157 script_args = parser.parse_args() 162 script_args = parser.parse_args()
158 config = shell_config.get_shell_config(script_args) 163 config = shell_config.get_shell_config(script_args)
159 164
160 try: 165 try:
161 shell, common_shell_args = shell_arguments.get_shell(config, []) 166 shell, common_shell_args = shell_arguments.get_shell(config, [])
162 except shell_arguments.ShellConfigurationException as e: 167 except shell_arguments.ShellConfigurationException as e:
163 print e 168 print e
164 return 1 169 return 1
165 170
166 target_os = 'android' if script_args.android else 'linux' 171 target_os = 'android' if script_args.android else 'linux'
167 benchmark_list_params = {"target_os": target_os} 172 benchmark_list_params = {"target_os": target_os}
168 exec script_args.benchmark_list_file in benchmark_list_params 173 exec script_args.benchmark_list_file in benchmark_list_params
169 174
170 exit_code = 0 175 exit_code = 0
171 for benchmark_spec in benchmark_list_params['benchmarks']: 176 for benchmark_spec in benchmark_list_params['benchmarks']:
172 benchmark_name = benchmark_spec['name'] 177 benchmark_name = benchmark_spec['name']
173 178
174 for variant_spec in _generate_benchmark_variants(benchmark_spec): 179 for variant_spec in _generate_benchmark_variants(benchmark_spec):
175 variant_name = variant_spec['variant_name'] 180 variant_name = variant_spec['variant_name']
176 app = variant_spec['app'] 181 app = variant_spec['app']
177 duration = variant_spec['duration'] 182 duration = variant_spec['duration']
178 shell_args = variant_spec.get('shell-args', []) + common_shell_args 183 shell_args = variant_spec.get('shell-args', []) + common_shell_args
179 measurements = variant_spec['measurements'] 184 measurements = variant_spec['measurements']
180 185
181 output_file = None 186 output_file = None
187 trace_all = False
182 if script_args.save_traces: 188 if script_args.save_traces:
183 output_file = 'benchmark-%s-%s-%s.trace' % ( 189 output_file = 'benchmark-%s-%s-%s.trace' % (
184 benchmark_name.replace(' ', '_'), 190 benchmark_name.replace(' ', '_'),
185 variant_name.replace(' ', '_'), 191 variant_name.replace(' ', '_'),
186 time.strftime('%Y%m%d%H%M%S')) 192 time.strftime('%Y%m%d%H%M%S'))
193 if script_args.trace_all:
194 trace_all = True
195 elif script_args.trace_all:
196 print 'warning: --trace-all argument ignored (see usage).'
187 197
188 chart_data_recorder = None 198 chart_data_recorder = None
189 if script_args.upload: 199 if script_args.upload:
190 chart_data_recorder = perf_dashboard.ChartDataRecorder( 200 chart_data_recorder = perf_dashboard.ChartDataRecorder(
191 script_args.test_name) 201 script_args.test_name)
192 202
193 benchmark_succeeded, benchmark_error, output = _run_benchmark( 203 benchmark_succeeded, benchmark_error, output = _run_benchmark(
194 shell, shell_args, app, duration, measurements, script_args.verbose, 204 shell, shell_args, app, duration, measurements, script_args.verbose,
195 script_args.android, output_file) 205 script_args.android, output_file, trace_all)
196 206
197 print '[ %s ] %s ' % (benchmark_name, variant_name) 207 print '[ %s ] %s ' % (benchmark_name, variant_name)
198 208
199 some_measurements_failed = False 209 some_measurements_failed = False
200 if benchmark_succeeded: 210 if benchmark_succeeded:
201 measurement_results = _parse_measurement_results(output) 211 measurement_results = _parse_measurement_results(output)
202 # Iterate over the list of specs, not the dictionary, to detect missing 212 # Iterate over the list of specs, not the dictionary, to detect missing
203 # results and preserve the required order. 213 # results and preserve the required order.
204 for measurement in measurements: 214 for measurement in measurements:
205 if measurement['spec'] in measurement_results: 215 if measurement['spec'] in measurement_results:
(...skipping 25 matching lines...) Expand all
231 perf_dashboard.upload_chart_data( 241 perf_dashboard.upload_chart_data(
232 script_args.master_name, script_args.bot_name, 242 script_args.master_name, script_args.bot_name,
233 script_args.test_name, script_args.builder_name, 243 script_args.test_name, script_args.builder_name,
234 script_args.build_number, chart_data_recorder.get_chart_data(), 244 script_args.build_number, chart_data_recorder.get_chart_data(),
235 script_args.server_url, script_args.dry_run) 245 script_args.server_url, script_args.dry_run)
236 246
237 return exit_code 247 return exit_code
238 248
239 if __name__ == '__main__': 249 if __name__ == '__main__':
240 sys.exit(main()) 250 sys.exit(main())
OLDNEW
« apps/benchmark/benchmark_app.cc ('K') | « apps/benchmark/run_args.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698