OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/env python |
| 2 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. |
| 5 |
| 6 """Runner for Mojo application benchmarks.""" |
| 7 |
| 8 import argparse |
| 9 import logging |
| 10 import sys |
| 11 |
| 12 from devtoolslib import shell_arguments |
| 13 from devtoolslib import shell_config |
| 14 |
| 15 |
| 16 _DESCRIPTION = """Runner for Mojo application benchmarks. |
| 17 |
| 18 |benchmark_list_file| has to be a valid Python program that sets a |benchmarks| |
| 19 global variable, containing entries of the following form: |
| 20 |
| 21 { |
| 22 'name': '<name of the benchmark>', |
| 23 'app': '<url of the app to benchmark>', |
| 24 'shell-args': [], |
| 25 'duration': <duration in seconds>, |
| 26 |
| 27 # List of measurements to make. |
| 28 'measurements': [ |
| 29 '<measurement type>/<event category>/<event name>', |
| 30 ] |
| 31 } |
| 32 |
| 33 Available measurement types are: |
| 34 |
| 35 - 'time_until' - time until the first occurence of the targeted event |
| 36 - 'avg_duration' - average duration of the targeted event |
| 37 |
| 38 |benchmark_list_file| may reference the |target_os| global that will be any of |
| 39 ['android', 'linux'], indicating the system on which the benchmarks are to be |
| 40 run. |
| 41 """ |
| 42 |
| 43 _logger = logging.getLogger() |
| 44 |
| 45 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo' |
| 46 |
| 47 # Additional time in seconds allocated per shell run to accommodate start-up. |
| 48 # The shell should terminate before hitting this time out, it is an error if it |
| 49 # doesn't. |
| 50 _EXTRA_TIMEOUT = 20 |
| 51 |
| 52 |
| 53 def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements, |
| 54 verbose): |
| 55 """Runs `benchmark.mojo` in a shell with correct arguments, parses and |
| 56 presents the benchmark results. |
| 57 """ |
| 58 timeout = duration_seconds + _EXTRA_TIMEOUT |
| 59 benchmark_args = [] |
| 60 benchmark_args.append('--app=' + app) |
| 61 benchmark_args.append('--duration=' + str(duration_seconds)) |
| 62 for measurement in measurements: |
| 63 benchmark_args.append(measurement) |
| 64 shell_args.append(_BENCHMARK_APP) |
| 65 shell_args.append('--args-for=%s %s' % (_BENCHMARK_APP, |
| 66 ' '.join(benchmark_args))) |
| 67 if verbose: |
| 68 print 'shell arguments: ' + str(shell_args) |
| 69 print '[' + name + ']' |
| 70 return_code, output, did_time_out = shell.run_and_get_output( |
| 71 shell_args, timeout=timeout) |
| 72 output_lines = [line.strip() for line in output.split('\n')] |
| 73 |
| 74 if return_code or did_time_out or 'benchmark succeeded' not in output_lines: |
| 75 print 'timed out' if did_time_out else 'failed' |
| 76 if return_code: |
| 77 print 'Return code: ' + str(return_code) |
| 78 print 'Output: ' |
| 79 print output |
| 80 print '-' * 72 |
| 81 return False |
| 82 |
| 83 # Echo measurement results. |
| 84 for line in output_lines: |
| 85 if line.strip().startswith('measurement:'): |
| 86 print line |
| 87 return True |
| 88 |
| 89 |
| 90 def main(): |
| 91 parser = argparse.ArgumentParser( |
| 92 formatter_class=argparse.RawDescriptionHelpFormatter, |
| 93 description=_DESCRIPTION) |
| 94 parser.add_argument('benchmark_list_file', type=file, |
| 95 help='a file listing benchmarks to run') |
| 96 |
| 97 # Common shell configuration arguments. |
| 98 shell_config.add_shell_arguments(parser) |
| 99 script_args = parser.parse_args() |
| 100 config = shell_config.get_shell_config(script_args) |
| 101 |
| 102 try: |
| 103 shell, common_shell_args = shell_arguments.get_shell(config, []) |
| 104 except shell_arguments.ShellConfigurationException as e: |
| 105 print e |
| 106 return 1 |
| 107 |
| 108 target_os = 'android' if script_args.android else 'linux' |
| 109 benchmark_list_params = {"target_os": target_os} |
| 110 exec script_args.benchmark_list_file in benchmark_list_params |
| 111 benchmark_list = benchmark_list_params['benchmarks'] |
| 112 |
| 113 succeeded = True |
| 114 for benchmark_spec in benchmark_list: |
| 115 name = benchmark_spec['name'] |
| 116 app = benchmark_spec['app'] |
| 117 duration = benchmark_spec['duration'] |
| 118 shell_args = benchmark_spec.get('shell-args', []) + common_shell_args |
| 119 measurements = benchmark_spec['measurements'] |
| 120 _run_benchmark(shell, shell_args, name, app, duration, measurements, |
| 121 script_args.verbose) |
| 122 |
| 123 return 0 if succeeded else 1 |
| 124 |
| 125 if __name__ == '__main__': |
| 126 sys.exit(main()) |
OLD | NEW |