Chromium Code Reviews| Index: mojo/devtools/common/mojo_benchmark |
| diff --git a/mojo/devtools/common/mojo_benchmark b/mojo/devtools/common/mojo_benchmark |
| index e0ffff0a82925b1de2b96c44d4d6dfa27a57c624..b57d45e206baa61b34fa1aa10f3a36de38a299d0 100755 |
| --- a/mojo/devtools/common/mojo_benchmark |
| +++ b/mojo/devtools/common/mojo_benchmark |
| @@ -50,22 +50,44 @@ _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo' |
| _CACHE_SERVICE_URL = 'mojo:url_response_disk_cache' |
| _NETWORK_SERVICE_URL = 'mojo:network_service' |
| +_COLD_START_SHELL_ARGS = [ |
| + '--args-for=%s %s' % (_CACHE_SERVICE_URL, '--clear'), |
| + '--args-for=%s %s' % (_NETWORK_SERVICE_URL, '--clear'), |
| +] |
| + |
| # Additional time in seconds allocated per shell run to accommodate start-up. |
| # The shell should terminate before hitting this time out, it is an error if it |
| # doesn't. |
| _EXTRA_TIMEOUT = 20 |
| -def _get_output_file(shell, name, cold_start): |
| - file_name = 'benchmark-%s-%s-%s.trace' % ( |
| - name.replace(' ', '_'), |
| - 'cold_start' if cold_start else 'warm_start', |
| - time.strftime('%Y%m%d%H%M%S')) |
| - return file_name |
| +def _rewrite_benchmark_list(benchmark_list): |
| + """Rewrites the specification of benchmarks to run, yielding two benchmarks |
| + (one for cold and one for warm start) for each benchmark defined in the input |
| + list. |
| + """ |
| + result = list() |
|
viettrungluu
2015/10/14 21:24:16
Probably "= []" would look more normal.
ppi
2015/10/14 21:47:27
Done.
|
| + for benchmark_spec in benchmark_list: |
|
viettrungluu
2015/10/14 21:24:16
I wonder if you shouldn't have the function be: _g
ppi
2015/10/14 21:47:27
Sg, done.
|
| + # Cold start. |
| + result.append({ |
| + 'name': benchmark_spec['name'] + ' (cold start)', |
| + 'app': benchmark_spec['app'], |
| + 'duration': benchmark_spec['duration'], |
| + 'measurements': benchmark_spec['measurements'], |
| + 'shell-args': benchmark_spec.get('shell-args', |
| + []) + _COLD_START_SHELL_ARGS}) |
| + # Warm start. |
| + result.append({ |
| + 'name': benchmark_spec['name'] + ' (warm start)', |
| + 'app': benchmark_spec['app'], |
| + 'duration': benchmark_spec['duration'], |
| + 'measurements': benchmark_spec['measurements'], |
| + 'shell-args': benchmark_spec.get('shell-args', [])}) |
| + return result |
| def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements, |
| - cold_start, verbose, android, save_traces): |
| + verbose, android, save_traces): |
| """Runs `benchmark.mojo` in shell with correct arguments, parses and |
| presents the benchmark results. |
| """ |
| @@ -77,7 +99,8 @@ def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements, |
| output_file = None |
| device_output_file = None |
| if save_traces: |
| - output_file = _get_output_file(shell, name, cold_start) |
| + output_file = 'benchmark-%s-%s.trace' % (name.replace(' ', '_'), |
| + time.strftime('%Y%m%d%H%M%S')) |
| if android: |
| device_output_file = os.path.join(shell.get_tmp_dir_path(), output_file) |
| benchmark_args.append('--trace-output=' + device_output_file) |
| @@ -93,13 +116,9 @@ def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements, |
| shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP, |
| ' '.join(benchmark_args))) |
| - if cold_start: |
| - shell_args.append('--args-for=%s %s' % (_CACHE_SERVICE_URL, '--clear')) |
| - shell_args.append('--args-for=%s %s' % (_NETWORK_SERVICE_URL, '--clear')) |
| - |
| if verbose: |
| print 'shell arguments: ' + str(shell_args) |
| - print '[ %s ] %s' % (name, 'cold start' if cold_start else 'warm start') |
| + print '[ %s ]' % name |
| return_code, output, did_time_out = shell.run_and_get_output( |
| shell_args, timeout=timeout) |
| output_lines = [line.strip() for line in output.split('\n')] |
| @@ -146,7 +165,7 @@ def main(): |
| target_os = 'android' if script_args.android else 'linux' |
| benchmark_list_params = {"target_os": target_os} |
| exec script_args.benchmark_list_file in benchmark_list_params |
| - benchmark_list = benchmark_list_params['benchmarks'] |
| + benchmark_list = _rewrite_benchmark_list(benchmark_list_params['benchmarks']) |
| succeeded = True |
| for benchmark_spec in benchmark_list: |
| @@ -156,12 +175,7 @@ def main(): |
| shell_args = benchmark_spec.get('shell-args', []) + common_shell_args |
| measurements = benchmark_spec['measurements'] |
| _run_benchmark(shell, shell_args, name, app, duration, measurements, |
| - cold_start=True, verbose=script_args.verbose, |
| - android=script_args.android, |
| - save_traces=script_args.save_traces) |
| - _run_benchmark(shell, shell_args, name, app, duration, measurements, |
| - cold_start=False, verbose=script_args.verbose, |
| - android=script_args.android, |
| + verbose=script_args.verbose, android=script_args.android, |
|
viettrungluu
2015/10/14 21:24:16
I find it odd that you give these things as keywor
ppi
2015/10/14 21:47:27
Done.
|
| save_traces=script_args.save_traces) |
| return 0 if succeeded else 1 |