Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(84)

Side by Side Diff: mojo/devtools/common/mojo_benchmark

Issue 1347063002: Teach `benchmark.mojo` to save the collected trace file to disk. (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « apps/benchmark/run_args.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright 2015 The Chromium Authors. All rights reserved. 2 # Copyright 2015 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runner for Mojo application benchmarks.""" 6 """Runner for Mojo application benchmarks."""
7 7
8 import argparse 8 import argparse
9 import logging 9 import logging
10 import sys 10 import sys
11 import time
11 12
12 from devtoolslib import shell_arguments 13 from devtoolslib import shell_arguments
13 from devtoolslib import shell_config 14 from devtoolslib import shell_config
14 15
15 16
16 _DESCRIPTION = """Runner for Mojo application benchmarks. 17 _DESCRIPTION = """Runner for Mojo application benchmarks.
17 18
18 |benchmark_list_file| has to be a valid Python program that sets a |benchmarks| 19 |benchmark_list_file| has to be a valid Python program that sets a |benchmarks|
19 global variable, containing entries of the following form: 20 global variable, containing entries of the following form:
20 21
(...skipping 24 matching lines...) Expand all
45 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo' 46 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo'
46 _CACHE_SERVICE_URL = 'mojo:url_response_disk_cache' 47 _CACHE_SERVICE_URL = 'mojo:url_response_disk_cache'
47 48
48 # Additional time in seconds allocated per shell run to accommodate start-up. 49 # Additional time in seconds allocated per shell run to accommodate start-up.
49 # The shell should terminate before hitting this time out, it is an error if it 50 # The shell should terminate before hitting this time out, it is an error if it
50 # doesn't. 51 # doesn't.
51 _EXTRA_TIMEOUT = 20 52 _EXTRA_TIMEOUT = 20
52 53
53 54
54 def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements, 55 def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements,
55 cold_start, verbose): 56 cold_start, verbose, save_traces):
56 """Runs `benchmark.mojo` in shell with correct arguments, parses and 57 """Runs `benchmark.mojo` in shell with correct arguments, parses and
57 presents the benchmark results. 58 presents the benchmark results.
58 """ 59 """
59 timeout = duration_seconds + _EXTRA_TIMEOUT 60 timeout = duration_seconds + _EXTRA_TIMEOUT
60 benchmark_args = [] 61 benchmark_args = []
61 benchmark_args.append('--app=' + app) 62 benchmark_args.append('--app=' + app)
62 benchmark_args.append('--duration=' + str(duration_seconds)) 63 benchmark_args.append('--duration=' + str(duration_seconds))
64 if save_traces:
65 trace_output_file = 'benchmark-%s-%s-%s.trace' % (
66 name.replace(' ', '_'),
67 'cold_start' if cold_start else 'warm_start',
68 time.strftime('%Y%m%d%H%M%S'))
69 benchmark_args.append('--trace-output=' + trace_output_file)
70
63 for measurement in measurements: 71 for measurement in measurements:
64 benchmark_args.append(measurement) 72 benchmark_args.append(measurement)
65 73
66 shell_args = list(shell_args) 74 shell_args = list(shell_args)
67 shell_args.append(_BENCHMARK_APP) 75 shell_args.append(_BENCHMARK_APP)
68 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP, 76 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP,
69 ' '.join(benchmark_args))) 77 ' '.join(benchmark_args)))
70 78
71 if cold_start: 79 if cold_start:
72 shell_args.append('--args-for=%s %s' % (_CACHE_SERVICE_URL, '--clear')) 80 shell_args.append('--args-for=%s %s' % (_CACHE_SERVICE_URL, '--clear'))
(...skipping 20 matching lines...) Expand all
93 print line 101 print line
94 return True 102 return True
95 103
96 104
97 def main(): 105 def main():
98 parser = argparse.ArgumentParser( 106 parser = argparse.ArgumentParser(
99 formatter_class=argparse.RawDescriptionHelpFormatter, 107 formatter_class=argparse.RawDescriptionHelpFormatter,
100 description=_DESCRIPTION) 108 description=_DESCRIPTION)
101 parser.add_argument('benchmark_list_file', type=file, 109 parser.add_argument('benchmark_list_file', type=file,
102 help='a file listing benchmarks to run') 110 help='a file listing benchmarks to run')
111 parser.add_argument('--save-traces', action='store_true',
112 help='save the traces produced by benchmarks to disk')
103 113
104 # Common shell configuration arguments. 114 # Common shell configuration arguments.
105 shell_config.add_shell_arguments(parser) 115 shell_config.add_shell_arguments(parser)
106 script_args = parser.parse_args() 116 script_args = parser.parse_args()
107 config = shell_config.get_shell_config(script_args) 117 config = shell_config.get_shell_config(script_args)
108 118
109 try: 119 try:
110 shell, common_shell_args = shell_arguments.get_shell(config, []) 120 shell, common_shell_args = shell_arguments.get_shell(config, [])
111 except shell_arguments.ShellConfigurationException as e: 121 except shell_arguments.ShellConfigurationException as e:
112 print e 122 print e
113 return 1 123 return 1
114 124
115 target_os = 'android' if script_args.android else 'linux' 125 target_os = 'android' if script_args.android else 'linux'
116 benchmark_list_params = {"target_os": target_os} 126 benchmark_list_params = {"target_os": target_os}
117 exec script_args.benchmark_list_file in benchmark_list_params 127 exec script_args.benchmark_list_file in benchmark_list_params
118 benchmark_list = benchmark_list_params['benchmarks'] 128 benchmark_list = benchmark_list_params['benchmarks']
119 129
120 succeeded = True 130 succeeded = True
121 for benchmark_spec in benchmark_list: 131 for benchmark_spec in benchmark_list:
122 name = benchmark_spec['name'] 132 name = benchmark_spec['name']
123 app = benchmark_spec['app'] 133 app = benchmark_spec['app']
124 duration = benchmark_spec['duration'] 134 duration = benchmark_spec['duration']
125 shell_args = benchmark_spec.get('shell-args', []) + common_shell_args 135 shell_args = benchmark_spec.get('shell-args', []) + common_shell_args
126 measurements = benchmark_spec['measurements'] 136 measurements = benchmark_spec['measurements']
127 _run_benchmark(shell, shell_args, name, app, duration, measurements, 137 _run_benchmark(shell, shell_args, name, app, duration, measurements,
128 cold_start=True, verbose=script_args.verbose) 138 cold_start=True, verbose=script_args.verbose,
139 save_traces=script_args.save_traces)
129 _run_benchmark(shell, shell_args, name, app, duration, measurements, 140 _run_benchmark(shell, shell_args, name, app, duration, measurements,
130 cold_start=False, verbose=script_args.verbose) 141 cold_start=False, verbose=script_args.verbose,
142 save_traces=script_args.save_traces)
131 143
132 return 0 if succeeded else 1 144 return 0 if succeeded else 1
133 145
134 if __name__ == '__main__': 146 if __name__ == '__main__':
135 sys.exit(main()) 147 sys.exit(main())
OLDNEW
« no previous file with comments | « apps/benchmark/run_args.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698