Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(85)

Side by Side Diff: scripts/slave/recipe_modules/bisect_tester/perf_test.py

Issue 2247373002: Refactor stages 1, 2 and test_api overhaul. (Closed) Base URL: https://chromium.googlesource.com/chromium/tools/build.git@master
Patch Set: Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 # Copyright 2015 The Chromium Authors. All rights reserved. 1 # Copyright 2015 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 import os 5 import os
6 import re 6 import re
7 import time 7 import time
8 8
9 from . import parse_metric 9 from . import parse_metric
10 10
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
72 """Attempts to discern whether or not a given command is running telemetry.""" 72 """Attempts to discern whether or not a given command is running telemetry."""
73 return 'run_benchmark' in command 73 return 'run_benchmark' in command
74 74
75 75
76 def run_perf_test(api, test_config, **kwargs): 76 def run_perf_test(api, test_config, **kwargs):
77 """Runs the command N times and parses a metric from the output.""" 77 """Runs the command N times and parses a metric from the output."""
78 # TODO(prasadv): Consider extracting out the body of the for loop into 78 # TODO(prasadv): Consider extracting out the body of the for loop into
79 # a helper method, or extract the metric-extraction to make this more 79 # a helper method, or extract the metric-extraction to make this more
80 # cleaner. 80 # cleaner.
81 limit = test_config['max_time_minutes'] * kwargs.get('time_multiplier', 1) 81 limit = test_config['max_time_minutes'] * kwargs.get('time_multiplier', 1)
82 run_results = {'measured_values': [], 'errors': set()} 82 results = {'valueset_paths': [], 'chartjson_paths': [], 'errors': set(),
83 values = run_results['measured_values'] 83 'retcodes': [], 'values': [], 'output': []}
84 metric = test_config.get('metric') 84 metric = test_config.get('metric')
85 retcodes = []
86 output_for_all_runs = []
87 temp_dir = None 85 temp_dir = None
88 repeat_cnt = test_config['repeat_count'] 86 repeat_cnt = test_config['repeat_count']
89 87
90 command = test_config['command'] 88 command = test_config['command']
91 use_chartjson = bool('chartjson' in command) 89 use_chartjson = bool('chartjson' in command)
90 use_valueset = bool('valueset' in command)
92 is_telemetry = _is_telemetry_command(command) 91 is_telemetry = _is_telemetry_command(command)
93 start_time = time.time() 92 start_time = time.time()
94 93
95 if api.m.chromium.c.TARGET_PLATFORM == 'android' and is_telemetry: 94 if api.m.chromium.c.TARGET_PLATFORM == 'android' and is_telemetry:
96 device_serial_number = api.device_to_test; 95 device_serial_number = api.device_to_test;
97 if device_serial_number: 96 if device_serial_number:
98 command += ' --device ' + device_serial_number # pragma: no cover 97 command += ' --device ' + device_serial_number # pragma: no cover
99 98
100 for i in range(repeat_cnt): 99 for i in range(repeat_cnt):
101 elapsed_minutes = (time.time() - start_time) / 60.0 100 elapsed_minutes = (time.time() - start_time) / 60.0
102 # A limit of 0 means 'no timeout set'. 101 # A limit of 0 means 'no timeout set'.
103 if limit and elapsed_minutes >= limit: # pragma: no cover 102 if limit and elapsed_minutes >= limit: # pragma: no cover
104 break 103 break
105 if is_telemetry: 104 if is_telemetry:
106 if i == 0 and kwargs.get('reset_on_first_run'): 105 if i == 0 and kwargs.get('reset_on_first_run'):
107 command += ' --reset-results' 106 command += ' --reset-results'
108 if i == repeat_cnt - 1 and kwargs.get('upload_on_last_run'): 107 if i == repeat_cnt - 1 and kwargs.get('upload_on_last_run'):
109 command += ' --upload-results' 108 command += ' --upload-results'
110 if kwargs.get('results_label'): 109 if kwargs.get('results_label'):
111 command += ' --results-label=%s' % kwargs.get('results_label') 110 command += ' --results-label=%s' % kwargs.get('results_label')
112 if use_chartjson: # pragma: no cover 111 if use_chartjson or use_valueset: # pragma: no cover
113 temp_dir = api.m.path.mkdtemp('perf-test-output') 112 temp_dir = api.m.path.mkdtemp('perf-test-output')
114 command = _set_output_dir(command, str(temp_dir)) 113 command = _set_output_dir(command, str(temp_dir))
115 results_path = temp_dir.join('results-chart.json') 114 chartjson_path = temp_dir.join('results-chart.json')
115 valueset_path = temp_dir.join('results-valueset.json')
116 116
117 step_name = "Performance Test%s %d of %d" % ( 117 step_name = "Performance Test%s %d of %d" % (
118 ' (%s)' % kwargs['name'] if 'name' in kwargs else '', i + 1, repeat_cnt) 118 ' (%s)' % kwargs['name'] if 'name' in kwargs else '', i + 1, repeat_cnt)
119 if api.m.platform.is_linux: 119 if api.m.platform.is_linux:
120 os.environ['CHROME_DEVEL_SANDBOX'] = api.m.path.join( 120 os.environ['CHROME_DEVEL_SANDBOX'] = api.m.path.join(
121 '/opt', 'chromium', 'chrome_sandbox') 121 '/opt', 'chromium', 'chrome_sandbox')
122 out, err, retcode = _run_command(api, command, step_name) 122 out, err, retcode = _run_command(api, command, step_name, **kwargs)
123 results['output'].append(out or '')
123 124
124 if out is None and err is None: 125 if out is None and err is None:
125 # dummy value when running test TODO: replace with a mock 126 # dummy value when running test TODO: replace with a mock
126 values.append(0) 127 results['values'].append(0)
127 elif metric: # pragma: no cover 128 elif metric: # pragma: no cover
128 if use_chartjson: 129 if use_chartjson:
129 step_result = api.m.json.read( 130 try:
130 'Reading chartjson results', results_path) 131 step_result = api.m.json.read(
131 has_valid_value, value = find_values( 132 'Reading chartjson results', chartjson_path)
132 step_result.json.output, Metric(metric)) 133 except api.m.step.StepFailure:
133 else: 134 pass
135 else:
136 if step_result.json.output:
137 results['chartjson_paths'].append(chartjson_path)
138 if use_valueset:
139 try:
140 step_result = api.m.json.read(
141 'Reading valueset results', valueset_path,
142 step_test_data=lambda: api.m.json.test_api.output(
143 {'dummy':'dict'}))
144 except api.m.step.StepFailure:
145 pass
146 else:
147 if step_result.json.output:
148 results['valueset_paths'].append(valueset_path)
149 if not use_valueset and not use_chartjson:
134 has_valid_value, value = parse_metric.parse_metric( 150 has_valid_value, value = parse_metric.parse_metric(
135 out, err, metric.split('/')) 151 out, err, metric.split('/'))
136 output_for_all_runs.append(out) 152 if has_valid_value:
137 if has_valid_value: 153 results['values'].extend(value)
138 values.extend(value) 154 else:
139 else: 155 # This means the metric was not found in the output.
140 # This means the metric was not found in the output. 156 if not retcode:
141 if not retcode: 157 # If all tests passed, but the metric was not found, this means that
142 # If all tests passed, but the metric was not found, this means that 158 # something changed on the test, or the given metric name was
143 # something changed on the test, or the given metric name was 159 # incorrect, we need to surface this on the bisector.
144 # incorrect, we need to surface this on the bisector. 160 results['errors'].add('MISSING_METRIC')
145 run_results['errors'].add('MISSING_METRIC') 161 results['retcodes'].append(retcode)
146 else:
147 output_for_all_runs.append(out)
148 retcodes.append(retcode)
149 162
150 return run_results, output_for_all_runs, retcodes 163 return results
151
152 164
153 def find_values(results, metric): # pragma: no cover 165 def find_values(results, metric): # pragma: no cover
154 """Tries to extract the given metric from the given results. 166 """Tries to extract the given metric from the given results.
155 167
156 This method tries several different possible chart names depending 168 This method tries several different possible chart names depending
157 on the given metric. 169 on the given metric.
158 170
159 Args: 171 Args:
160 results: The chartjson dict. 172 results: The chartjson dict.
161 metric: A Metric instance. 173 metric: A Metric instance.
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
195 """ 207 """
196 if (file_path.startswith('src/') or file_path.startswith('./src/')): 208 if (file_path.startswith('src/') or file_path.startswith('./src/')):
197 return api.m.path['checkout'].join( 209 return api.m.path['checkout'].join(
198 *file_path.split('src', 1)[1].split('/')[1:]) 210 *file_path.split('src', 1)[1].split('/')[1:])
199 elif (file_path.startswith('src\\') or 211 elif (file_path.startswith('src\\') or
200 file_path.startswith('.\\src\\')): # pragma: no cover 212 file_path.startswith('.\\src\\')): # pragma: no cover
201 return api.m.path['checkout'].join( 213 return api.m.path['checkout'].join(
202 *file_path.split('src', 1)[1].split('\\')[1:]) 214 *file_path.split('src', 1)[1].split('\\')[1:])
203 return file_path 215 return file_path
204 216
205 def _run_command(api, command, step_name): 217 def _run_command(api, command, step_name, **kwargs):
206 command_parts = command.split() 218 command_parts = command.split()
207 stdout = api.m.raw_io.output() 219 stdout = api.m.raw_io.output()
208 stderr = api.m.raw_io.output() 220 stderr = api.m.raw_io.output()
209 221
210 # TODO(prasadv): Remove this once bisect runs are no longer running 222 # TODO(prasadv): Remove this once bisect runs are no longer running
211 # against revisions from February 2016 or earlier. 223 # against revisions from February 2016 or earlier.
212 kwargs = {}
213 if 'android-chrome' in command: # pragma: no cover 224 if 'android-chrome' in command: # pragma: no cover
214 kwargs['env'] = {'CHROMIUM_OUTPUT_DIR': api.m.chromium.output_dir} 225 kwargs['env'] = {'CHROMIUM_OUTPUT_DIR': api.m.chromium.output_dir}
215 226
216 # By default, we assume that the test to run is an executable binary. In the 227 # By default, we assume that the test to run is an executable binary. In the
217 # case of python scripts, runtest.py will guess based on the extension. 228 # case of python scripts, runtest.py will guess based on the extension.
218 python_mode = False 229 python_mode = False
219 if command_parts[0] == 'python': # pragma: no cover 230 if command_parts[0] == 'python': # pragma: no cover
220 # Dashboard prepends the command with 'python' when on windows, however, it 231 # Dashboard prepends the command with 'python' when on windows, however, it
221 # is not necessary to pass this along to the runtest.py invocation. 232 # is not necessary to pass this along to the runtest.py invocation.
222 # TODO(robertocn): Remove this clause when dashboard stops sending python as 233 # TODO(robertocn): Remove this clause when dashboard stops sending python as
(...skipping 18 matching lines...) Expand all
241 step_result.presentation.logs['Captured Output'] = ( 252 step_result.presentation.logs['Captured Output'] = (
242 step_result.stdout or '').splitlines() 253 step_result.stdout or '').splitlines()
243 except api.m.step.StepFailure as sf: 254 except api.m.step.StepFailure as sf:
244 sf.result.presentation.logs['Failure Output'] = ( 255 sf.result.presentation.logs['Failure Output'] = (
245 sf.result.stdout or '').splitlines() 256 sf.result.stdout or '').splitlines()
246 if sf.result.stderr: # pragma: no cover 257 if sf.result.stderr: # pragma: no cover
247 sf.result.presentation.logs['stderr'] = ( 258 sf.result.presentation.logs['stderr'] = (
248 sf.result.stderr).splitlines() 259 sf.result.stderr).splitlines()
249 return sf.result.stdout, sf.result.stderr, sf.result.retcode 260 return sf.result.stdout, sf.result.stderr, sf.result.retcode
250 return step_result.stdout, step_result.stderr, step_result.retcode 261 return step_result.stdout, step_result.stderr, step_result.retcode
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698