OLD | NEW |
1 # Copyright 2015 The Chromium Authors. All rights reserved. | 1 # Copyright 2015 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 import os | 5 import os |
6 import re | 6 import re |
7 import time | 7 import time |
8 | 8 |
9 from . import parse_metric | 9 from . import parse_metric |
10 | 10 |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
72 """Attempts to discern whether or not a given command is running telemetry.""" | 72 """Attempts to discern whether or not a given command is running telemetry.""" |
73 return 'run_benchmark' in command | 73 return 'run_benchmark' in command |
74 | 74 |
75 | 75 |
76 def run_perf_test(api, test_config, **kwargs): | 76 def run_perf_test(api, test_config, **kwargs): |
77 """Runs the command N times and parses a metric from the output.""" | 77 """Runs the command N times and parses a metric from the output.""" |
78 # TODO(prasadv): Consider extracting out the body of the for loop into | 78 # TODO(prasadv): Consider extracting out the body of the for loop into |
79 # a helper method, or extract the metric-extraction to make this more | 79 # a helper method, or extract the metric-extraction to make this more |
80 # cleaner. | 80 # cleaner. |
81 limit = test_config['max_time_minutes'] * kwargs.get('time_multiplier', 1) | 81 limit = test_config['max_time_minutes'] * kwargs.get('time_multiplier', 1) |
82 run_results = {'measured_values': [], 'errors': set()} | 82 results = {'valueset_paths': [], 'chartjson_paths': [], 'errors': set(), |
83 values = run_results['measured_values'] | 83 'retcodes': [], 'values': [], 'output': []} |
84 metric = test_config.get('metric') | 84 metric = test_config.get('metric') |
85 retcodes = [] | |
86 output_for_all_runs = [] | |
87 temp_dir = None | 85 temp_dir = None |
88 repeat_cnt = test_config['repeat_count'] | 86 repeat_cnt = test_config['repeat_count'] |
89 | 87 |
90 command = test_config['command'] | 88 command = test_config['command'] |
91 use_chartjson = bool('chartjson' in command) | 89 use_chartjson = bool('chartjson' in command) |
| 90 use_valueset = bool('valueset' in command) |
92 is_telemetry = _is_telemetry_command(command) | 91 is_telemetry = _is_telemetry_command(command) |
93 start_time = time.time() | 92 start_time = time.time() |
94 | 93 |
95 if api.m.chromium.c.TARGET_PLATFORM == 'android' and is_telemetry: | 94 if api.m.chromium.c.TARGET_PLATFORM == 'android' and is_telemetry: |
96 device_serial_number = api.device_to_test; | 95 device_serial_number = api.device_to_test; |
97 if device_serial_number: | 96 if device_serial_number: |
98 command += ' --device ' + device_serial_number # pragma: no cover | 97 command += ' --device ' + device_serial_number # pragma: no cover |
99 | 98 |
100 for i in range(repeat_cnt): | 99 for i in range(repeat_cnt): |
101 elapsed_minutes = (time.time() - start_time) / 60.0 | 100 elapsed_minutes = (time.time() - start_time) / 60.0 |
102 # A limit of 0 means 'no timeout set'. | 101 # A limit of 0 means 'no timeout set'. |
103 if limit and elapsed_minutes >= limit: # pragma: no cover | 102 if limit and elapsed_minutes >= limit: # pragma: no cover |
104 break | 103 break |
105 if is_telemetry: | 104 if is_telemetry: |
106 if i == 0 and kwargs.get('reset_on_first_run'): | 105 if i == 0 and kwargs.get('reset_on_first_run'): |
107 command += ' --reset-results' | 106 command += ' --reset-results' |
108 if i == repeat_cnt - 1 and kwargs.get('upload_on_last_run'): | 107 if i == repeat_cnt - 1 and kwargs.get('upload_on_last_run'): |
109 command += ' --upload-results' | 108 command += ' --upload-results' |
110 if kwargs.get('results_label'): | 109 if kwargs.get('results_label'): |
111 command += ' --results-label=%s' % kwargs.get('results_label') | 110 command += ' --results-label=%s' % kwargs.get('results_label') |
112 if use_chartjson: # pragma: no cover | 111 if use_chartjson or use_valueset: # pragma: no cover |
113 temp_dir = api.m.path.mkdtemp('perf-test-output') | 112 temp_dir = api.m.path.mkdtemp('perf-test-output') |
114 command = _set_output_dir(command, str(temp_dir)) | 113 command = _set_output_dir(command, str(temp_dir)) |
115 results_path = temp_dir.join('results-chart.json') | 114 chartjson_path = temp_dir.join('results-chart.json') |
| 115 valueset_path = temp_dir.join('results-valueset.json') |
116 | 116 |
117 step_name = "Performance Test%s %d of %d" % ( | 117 step_name = "Performance Test%s %d of %d" % ( |
118 ' (%s)' % kwargs['name'] if 'name' in kwargs else '', i + 1, repeat_cnt) | 118 ' (%s)' % kwargs['name'] if 'name' in kwargs else '', i + 1, repeat_cnt) |
119 if api.m.platform.is_linux: | 119 if api.m.platform.is_linux: |
120 os.environ['CHROME_DEVEL_SANDBOX'] = api.m.path.join( | 120 os.environ['CHROME_DEVEL_SANDBOX'] = api.m.path.join( |
121 '/opt', 'chromium', 'chrome_sandbox') | 121 '/opt', 'chromium', 'chrome_sandbox') |
122 out, err, retcode = _run_command(api, command, step_name) | 122 out, err, retcode = _run_command(api, command, step_name, **kwargs) |
| 123 results['output'].append(out or '') |
123 | 124 |
124 if out is None and err is None: | 125 if out is None and err is None: |
125 # dummy value when running test TODO: replace with a mock | 126 # dummy value when running test TODO: replace with a mock |
126 values.append(0) | 127 results['values'].append(0) |
127 elif metric: # pragma: no cover | 128 elif metric: # pragma: no cover |
128 if use_chartjson: | 129 if use_chartjson: |
129 step_result = api.m.json.read( | 130 try: |
130 'Reading chartjson results', results_path) | 131 step_result = api.m.json.read( |
131 has_valid_value, value = find_values( | 132 'Reading chartjson results', chartjson_path) |
132 step_result.json.output, Metric(metric)) | 133 except api.m.step.StepFailure: |
133 else: | 134 pass |
| 135 else: |
| 136 if step_result.json.output: |
| 137 results['chartjson_paths'].append(chartjson_path) |
| 138 if use_valueset: |
| 139 try: |
| 140 step_result = api.m.json.read( |
| 141 'Reading valueset results', valueset_path, |
| 142 step_test_data=lambda: api.m.json.test_api.output( |
| 143 {'dummy':'dict'})) |
| 144 except api.m.step.StepFailure: |
| 145 pass |
| 146 else: |
| 147 if step_result.json.output: |
| 148 results['valueset_paths'].append(valueset_path) |
| 149 if not use_valueset and not use_chartjson: |
134 has_valid_value, value = parse_metric.parse_metric( | 150 has_valid_value, value = parse_metric.parse_metric( |
135 out, err, metric.split('/')) | 151 out, err, metric.split('/')) |
136 output_for_all_runs.append(out) | 152 if has_valid_value: |
137 if has_valid_value: | 153 results['values'].extend(value) |
138 values.extend(value) | 154 else: |
139 else: | 155 # This means the metric was not found in the output. |
140 # This means the metric was not found in the output. | 156 if not retcode: |
141 if not retcode: | 157 # If all tests passed, but the metric was not found, this means that |
142 # If all tests passed, but the metric was not found, this means that | 158 # something changed on the test, or the given metric name was |
143 # something changed on the test, or the given metric name was | 159 # incorrect, we need to surface this on the bisector. |
144 # incorrect, we need to surface this on the bisector. | 160 results['errors'].add('MISSING_METRIC') |
145 run_results['errors'].add('MISSING_METRIC') | 161 results['retcodes'].append(retcode) |
146 else: | |
147 output_for_all_runs.append(out) | |
148 retcodes.append(retcode) | |
149 | 162 |
150 return run_results, output_for_all_runs, retcodes | 163 return results |
151 | |
152 | 164 |
153 def find_values(results, metric): # pragma: no cover | 165 def find_values(results, metric): # pragma: no cover |
154 """Tries to extract the given metric from the given results. | 166 """Tries to extract the given metric from the given results. |
155 | 167 |
156 This method tries several different possible chart names depending | 168 This method tries several different possible chart names depending |
157 on the given metric. | 169 on the given metric. |
158 | 170 |
159 Args: | 171 Args: |
160 results: The chartjson dict. | 172 results: The chartjson dict. |
161 metric: A Metric instance. | 173 metric: A Metric instance. |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
195 """ | 207 """ |
196 if (file_path.startswith('src/') or file_path.startswith('./src/')): | 208 if (file_path.startswith('src/') or file_path.startswith('./src/')): |
197 return api.m.path['checkout'].join( | 209 return api.m.path['checkout'].join( |
198 *file_path.split('src', 1)[1].split('/')[1:]) | 210 *file_path.split('src', 1)[1].split('/')[1:]) |
199 elif (file_path.startswith('src\\') or | 211 elif (file_path.startswith('src\\') or |
200 file_path.startswith('.\\src\\')): # pragma: no cover | 212 file_path.startswith('.\\src\\')): # pragma: no cover |
201 return api.m.path['checkout'].join( | 213 return api.m.path['checkout'].join( |
202 *file_path.split('src', 1)[1].split('\\')[1:]) | 214 *file_path.split('src', 1)[1].split('\\')[1:]) |
203 return file_path | 215 return file_path |
204 | 216 |
205 def _run_command(api, command, step_name): | 217 def _run_command(api, command, step_name, **kwargs): |
206 command_parts = command.split() | 218 command_parts = command.split() |
207 stdout = api.m.raw_io.output() | 219 stdout = api.m.raw_io.output() |
208 stderr = api.m.raw_io.output() | 220 stderr = api.m.raw_io.output() |
209 | 221 |
| 222 inner_kwargs = {} |
| 223 if 'step_test_data' in kwargs: |
| 224 inner_kwargs['step_test_data'] = kwargs['step_test_data'] |
210 # TODO(prasadv): Remove this once bisect runs are no longer running | 225 # TODO(prasadv): Remove this once bisect runs are no longer running |
211 # against revisions from February 2016 or earlier. | 226 # against revisions from February 2016 or earlier. |
212 kwargs = {} | |
213 if 'android-chrome' in command: # pragma: no cover | 227 if 'android-chrome' in command: # pragma: no cover |
214 kwargs['env'] = {'CHROMIUM_OUTPUT_DIR': api.m.chromium.output_dir} | 228 inner_kwargs['env'] = {'CHROMIUM_OUTPUT_DIR': api.m.chromium.output_dir} |
215 | 229 |
216 # By default, we assume that the test to run is an executable binary. In the | 230 # By default, we assume that the test to run is an executable binary. In the |
217 # case of python scripts, runtest.py will guess based on the extension. | 231 # case of python scripts, runtest.py will guess based on the extension. |
218 python_mode = False | 232 python_mode = False |
219 if command_parts[0] == 'python': # pragma: no cover | 233 if command_parts[0] == 'python': # pragma: no cover |
220 # Dashboard prepends the command with 'python' when on windows, however, it | 234 # Dashboard prepends the command with 'python' when on windows, however, it |
221 # is not necessary to pass this along to the runtest.py invocation. | 235 # is not necessary to pass this along to the runtest.py invocation. |
222 # TODO(robertocn): Remove this clause when dashboard stops sending python as | 236 # TODO(robertocn): Remove this clause when dashboard stops sending python as |
223 # part of the command. | 237 # part of the command. |
224 # https://github.com/catapult-project/catapult/issues/2283 | 238 # https://github.com/catapult-project/catapult/issues/2283 |
225 command_parts = command_parts[1:] | 239 command_parts = command_parts[1:] |
226 python_mode = True | 240 python_mode = True |
227 elif _is_telemetry_command(command): | 241 elif _is_telemetry_command(command): |
228 # run_benchmark is a python script without an extension, hence we force | 242 # run_benchmark is a python script without an extension, hence we force |
229 # python mode. | 243 # python mode. |
230 python_mode = True | 244 python_mode = True |
231 try: | 245 try: |
232 step_result = api.m.chromium.runtest( | 246 step_result = api.m.chromium.runtest( |
233 test=_rebase_path(api, command_parts[0]), | 247 test=_rebase_path(api, command_parts[0]), |
234 args=command_parts[1:], | 248 args=command_parts[1:], |
235 xvfb=True, | 249 xvfb=True, |
236 name=step_name, | 250 name=step_name, |
237 python_mode=python_mode, | 251 python_mode=python_mode, |
238 stdout=stdout, | 252 stdout=stdout, |
239 stderr=stderr, | 253 stderr=stderr, |
240 **kwargs) | 254 **inner_kwargs) |
241 step_result.presentation.logs['Captured Output'] = ( | 255 step_result.presentation.logs['Captured Output'] = ( |
242 step_result.stdout or '').splitlines() | 256 step_result.stdout or '').splitlines() |
243 except api.m.step.StepFailure as sf: | 257 except api.m.step.StepFailure as sf: |
244 sf.result.presentation.logs['Failure Output'] = ( | 258 sf.result.presentation.logs['Failure Output'] = ( |
245 sf.result.stdout or '').splitlines() | 259 sf.result.stdout or '').splitlines() |
246 if sf.result.stderr: # pragma: no cover | 260 if sf.result.stderr: # pragma: no cover |
247 sf.result.presentation.logs['stderr'] = ( | 261 sf.result.presentation.logs['stderr'] = ( |
248 sf.result.stderr).splitlines() | 262 sf.result.stderr).splitlines() |
249 return sf.result.stdout, sf.result.stderr, sf.result.retcode | 263 return sf.result.stdout, sf.result.stderr, sf.result.retcode |
250 return step_result.stdout, step_result.stderr, step_result.retcode | 264 return step_result.stdout, step_result.stderr, step_result.retcode |
OLD | NEW |