Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # Copyright 2015 The Chromium Authors. All rights reserved. | 2 # Copyright 2015 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
| 5 | 5 |
| 6 """Runner for Mojo application benchmarks.""" | 6 """Runner for Mojo application benchmarks.""" |
| 7 | 7 |
| 8 import argparse | 8 import argparse |
| 9 import json | |
| 10 import logging | 9 import logging |
| 11 import os.path | 10 import os.path |
| 12 import re | 11 import re |
| 13 import sys | 12 import sys |
| 14 import time | 13 import time |
| 15 | 14 |
| 16 from devtoolslib import shell_arguments | 15 from devtoolslib import shell_arguments |
| 17 from devtoolslib import shell_config | 16 from devtoolslib import shell_config |
| 18 from devtoolslib import perf_dashboard | 17 from devtoolslib import perf_dashboard |
| 19 | 18 |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 64 run. | 63 run. |
| 65 """ | 64 """ |
| 66 | 65 |
| 67 _logger = logging.getLogger() | 66 _logger = logging.getLogger() |
| 68 | 67 |
| 69 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo' | 68 _BENCHMARK_APP = 'https://core.mojoapps.io/benchmark.mojo' |
| 70 _CACHE_SERVICE_URL = 'mojo:url_response_disk_cache' | 69 _CACHE_SERVICE_URL = 'mojo:url_response_disk_cache' |
| 71 _NETWORK_SERVICE_URL = 'mojo:network_service' | 70 _NETWORK_SERVICE_URL = 'mojo:network_service' |
| 72 | 71 |
| 73 _COLD_START_SHELL_ARGS = [ | 72 _COLD_START_SHELL_ARGS = [ |
| 74 '--args-for=%s %s' % (_CACHE_SERVICE_URL, '--clear'), | 73 '--args-for=%s %s' % (_CACHE_SERVICE_URL, '--clear'), |
| 75 '--args-for=%s %s' % (_NETWORK_SERVICE_URL, '--clear'), | 74 '--args-for=%s %s' % (_NETWORK_SERVICE_URL, '--clear'), |
| 76 ] | 75 ] |
| 77 | 76 |
| 78 # Additional time in seconds allocated per shell run to accommodate start-up. | 77 # Additional time in seconds allocated per shell run to accommodate start-up. |
| 79 # The shell should terminate before hitting this time out, it is an error if it | 78 # The shell should terminate before hitting this time out, it is an error if it |
| 80 # doesn't. | 79 # doesn't. |
| 81 _EXTRA_TIMEOUT = 20 | 80 _EXTRA_TIMEOUT = 20 |
| 82 | 81 |
| 83 _MEASUREMENT_RESULT_FORMAT = r""" | 82 _MEASUREMENT_RESULT_FORMAT = r""" |
| 84 ^ # Beginning of the line. | 83 ^ # Beginning of the line. |
| 85 measurement: # Hard-coded tag. | 84 measurement: # Hard-coded tag. |
| 86 \s+(\S+) # Match measurement spec. | 85 \s+(\S+) # Match measurement spec. |
| 87 \s+(\S+) # Match measurement result. | 86 \s+(\S+) # Match measurement result. |
| 88 $ # End of the line. | 87 $ # End of the line. |
| 89 """ | 88 """ |
| 90 | 89 |
| 91 _MEASUREMENT_REGEX = re.compile(_MEASUREMENT_RESULT_FORMAT, re.VERBOSE) | 90 _MEASUREMENT_REGEX = re.compile(_MEASUREMENT_RESULT_FORMAT, re.VERBOSE) |
| 92 | 91 |
| 93 | 92 |
| 94 def _generate_benchmark_variants(benchmark_spec): | 93 def _generate_benchmark_variants(benchmark_spec): |
| 95 """Generates benchmark specifications for individual variants of the given | 94 """Generates benchmark specifications for individual variants of the given |
| 96 benchmark: cold start and warm start. | 95 benchmark: cold start and warm start. |
| 97 | 96 |
| 98 Returns: | 97 Returns: |
| 99 A list of benchmark specs corresponding to individual variants of the given | 98 A list of benchmark specs corresponding to individual variants of the given |
| 100 benchmark. | 99 benchmark. |
| 101 """ | 100 """ |
| 102 variants = [] | 101 variants = [] |
| 103 variants.append({ | 102 variants.append({ |
| 104 'variant_name': 'cold start', | 103 'variant_name': 'cold start', |
| 105 'app': benchmark_spec['app'], | 104 'app': benchmark_spec['app'], |
| 106 'duration': benchmark_spec['duration'], | 105 'duration': benchmark_spec['duration'], |
| 107 'measurements': benchmark_spec['measurements'], | 106 'measurements': benchmark_spec['measurements'], |
| 108 'shell-args': benchmark_spec.get('shell-args', | 107 'shell-args': benchmark_spec.get('shell-args', |
| 109 []) + _COLD_START_SHELL_ARGS}) | 108 []) + _COLD_START_SHELL_ARGS}) |
| 110 variants.append({ | 109 variants.append({ |
| 111 'variant_name': 'warm start', | 110 'variant_name': 'warm start', |
| 112 'app': benchmark_spec['app'], | 111 'app': benchmark_spec['app'], |
| 113 'duration': benchmark_spec['duration'], | 112 'duration': benchmark_spec['duration'], |
| 114 'measurements': benchmark_spec['measurements'], | 113 'measurements': benchmark_spec['measurements'], |
| 115 'shell-args': benchmark_spec.get('shell-args', [])}) | 114 'shell-args': benchmark_spec.get('shell-args', [])}) |
| 116 return variants | 115 return variants |
| 117 | 116 |
| 118 | 117 |
| 119 def _run_benchmark(shell, shell_args, name, app, duration_seconds, measurements, | 118 def _run_benchmark(shell, shell_args, app, duration_seconds, measurements, |
| 120 verbose, android, output_file): | 119 verbose, android, output_file): |
| 121 """Runs the given benchmark by running `benchmark.mojo` in mojo shell with | 120 """Runs the given benchmark by running `benchmark.mojo` in mojo shell with |
| 122 appropriate arguments and returns the produced output. | 121 appropriate arguments and returns the produced output. |
| 123 | 122 |
| 124 Returns: | 123 Returns: |
| 125 A tuple of (succeeded, error_msg, output). | 124 A tuple of (succeeded, error_msg, output). |
| 126 """ | 125 """ |
| 127 timeout = duration_seconds + _EXTRA_TIMEOUT | 126 timeout = duration_seconds + _EXTRA_TIMEOUT |
| 128 benchmark_args = [] | 127 benchmark_args = [] |
| 129 benchmark_args.append('--app=' + app) | 128 benchmark_args.append('--app=' + app) |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 156 if return_code: | 155 if return_code: |
| 157 return False, 'return code: ' + str(return_code), output | 156 return False, 'return code: ' + str(return_code), output |
| 158 | 157 |
| 159 # Pull the trace file even if some measurements are missing, as it can be | 158 # Pull the trace file even if some measurements are missing, as it can be |
| 160 # useful in debugging. | 159 # useful in debugging. |
| 161 if device_output_file: | 160 if device_output_file: |
| 162 shell.pull_file(device_output_file, output_file, remove_original=True) | 161 shell.pull_file(device_output_file, output_file, remove_original=True) |
| 163 | 162 |
| 164 return True, None, output | 163 return True, None, output |
| 165 | 164 |
| 165 | |
| 166 def _parse_measurement_results(output): | 166 def _parse_measurement_results(output): |
| 167 """Parses the measurement results present in the benchmark output and returns | 167 """Parses the measurement results present in the benchmark output and returns |
| 168 the dictionary of correctly recognized and parsed results. | 168 the dictionary of correctly recognized and parsed results. |
| 169 """ | 169 """ |
| 170 measurement_results = {} | 170 measurement_results = {} |
| 171 output_lines = [line.strip() for line in output.split('\n')] | 171 output_lines = [line.strip() for line in output.split('\n')] |
| 172 for line in output_lines: | 172 for line in output_lines: |
| 173 match = re.match(_MEASUREMENT_REGEX, line) | 173 match = re.match(_MEASUREMENT_REGEX, line) |
| 174 if match: | 174 if match: |
| 175 measurement_spec = match.group(1) | 175 measurement_spec = match.group(1) |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 223 benchmark_name.replace(' ', '_'), | 223 benchmark_name.replace(' ', '_'), |
| 224 variant_name.replace(' ', '_'), | 224 variant_name.replace(' ', '_'), |
| 225 time.strftime('%Y%m%d%H%M%S')) | 225 time.strftime('%Y%m%d%H%M%S')) |
| 226 | 226 |
| 227 chart_data_recorder = None | 227 chart_data_recorder = None |
| 228 if script_args.upload: | 228 if script_args.upload: |
| 229 chart_data_recorder = perf_dashboard.ChartDataRecorder( | 229 chart_data_recorder = perf_dashboard.ChartDataRecorder( |
| 230 script_args.test_name) | 230 script_args.test_name) |
| 231 | 231 |
| 232 benchmark_succeeded, benchmark_error, output = _run_benchmark( | 232 benchmark_succeeded, benchmark_error, output = _run_benchmark( |
| 233 shell, shell_args, variant_name, app, duration, measurements, | 233 shell, shell_args, app, duration, measurements, script_args.verbose, |
|
qsr
2015/10/28 12:17:30
This doesnt look like style.
ppi
2015/10/28 13:09:15
fixed the commit description.
| |
| 234 script_args.verbose, script_args.android, output_file) | 234 script_args.android, output_file) |
| 235 | 235 |
| 236 print '[ %s ] %s ' % (benchmark_name, variant_name) | 236 print '[ %s ] %s ' % (benchmark_name, variant_name) |
| 237 | 237 |
| 238 some_measurements_failed = False | 238 some_measurements_failed = False |
| 239 if benchmark_succeeded: | 239 if benchmark_succeeded: |
| 240 measurement_results = _parse_measurement_results(output) | 240 measurement_results = _parse_measurement_results(output) |
| 241 # Iterate over the list of specs, not the dictionary, to detect missing | 241 # Iterate over the list of specs, not the dictionary, to detect missing |
| 242 # results and preserve the required order. | 242 # results and preserve the required order. |
| 243 for measurement in measurements: | 243 for measurement in measurements: |
| 244 if measurement['spec'] in measurement_results: | 244 if measurement['spec'] in measurement_results: |
| (...skipping 25 matching lines...) Expand all Loading... | |
| 270 perf_dashboard.upload_chart_data( | 270 perf_dashboard.upload_chart_data( |
| 271 script_args.master_name, script_args.bot_name, | 271 script_args.master_name, script_args.bot_name, |
| 272 script_args.test_name, script_args.builder_name, | 272 script_args.test_name, script_args.builder_name, |
| 273 script_args.build_number, chart_data_recorder.get_chart_data(), | 273 script_args.build_number, chart_data_recorder.get_chart_data(), |
| 274 script_args.server_url, script_args.dry_run) | 274 script_args.server_url, script_args.dry_run) |
| 275 | 275 |
| 276 return exit_code | 276 return exit_code |
| 277 | 277 |
| 278 if __name__ == '__main__': | 278 if __name__ == '__main__': |
| 279 sys.exit(main()) | 279 sys.exit(main()) |
| OLD | NEW |