OLD | NEW |
---|---|
1 # Copyright 2013 The Chromium Authors. All rights reserved. | 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Runs perf tests. | 5 """Runs perf tests. |
6 | 6 |
7 Our buildbot infrastructure requires each slave to run steps serially. | 7 Our buildbot infrastructure requires each slave to run steps serially. |
8 This is sub-optimal for android, where these steps can run independently on | 8 This is sub-optimal for android, where these steps can run independently on |
9 multiple connected devices. | 9 multiple connected devices. |
10 | 10 |
11 The buildbots will run this script multiple times per cycle: | 11 The buildbots will run this script multiple times per cycle: |
12 - First: all steps listed in --steps in will be executed in parallel using all | 12 - First: all steps listed in --steps in will be executed in parallel using all |
13 connected devices. Step results will be pickled to disk. Each step has a unique | 13 connected devices. Step results will be pickled to disk. Each step has a unique |
14 name. The result code will be ignored if the step name is listed in | 14 name. The result code will be ignored if the step name is listed in |
15 --flaky-steps. | 15 --flaky-steps. |
16 The buildbot will treat this step as a regular step, and will not process any | 16 The buildbot will treat this step as a regular step, and will not process any |
17 graph data. | 17 graph data. |
18 | 18 |
19 - Then, with -print-step STEP_NAME: at this stage, we'll simply print the file | 19 - Then, with -print-step STEP_NAME: at this stage, we'll simply print the file |
20 with the step results previously saved. The buildbot will then process the graph | 20 with the step results previously saved. The buildbot will then process the graph |
21 data accordingly. | 21 data accordingly. |
22 | 22 |
23 The JSON steps file contains a dictionary in the format: | |
24 { "version": int, | |
25 "steps": { | |
26 "foo": { | |
27 "device_affinity": int, | |
jbudorick
2014/05/29 19:45:25
This might get into how we generate the json, and
bulach
2014/05/30 09:21:26
it's complicated :)
as above, we do not want to ru
| |
28 "cmd": "script_to_execute foo" | |
29 }, | |
30 "bar": { | |
31 "device_affinity": int, | |
32 "cmd": "script_to_execute bar" | |
33 } | |
34 } | |
35 } | |
bulach
2014/05/29 18:43:32
tony: this is the information this script needs.
i
| |
23 | 36 |
24 The JSON steps file contains a dictionary in the format: | 37 # TODO(bulach): remove once it rolls downstream, crbug.com/378862. |
38 The OLD JSON steps file contains a dictionary in the format: | |
25 [ | 39 [ |
26 ["step_name_foo", "script_to_execute foo"], | 40 ["step_name_foo", "script_to_execute foo"], |
27 ["step_name_bar", "script_to_execute bar"] | 41 ["step_name_bar", "script_to_execute bar"] |
28 ] | 42 ] |
29 | 43 |
30 This preserves the order in which the steps are executed. | 44 This preserves the order in which the steps are executed. |
31 | 45 |
32 The JSON flaky steps file contains a list with step names which results should | 46 The JSON flaky steps file contains a list with step names which results should |
33 be ignored: | 47 be ignored: |
34 [ | 48 [ |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
124 if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL: | 138 if now - self._tick >= _HeartBeatLogger._PRINT_INTERVAL: |
125 self._tick = now | 139 self._tick = now |
126 print '--single-step output length %d' % self._len | 140 print '--single-step output length %d' % self._len |
127 sys.stdout.flush() | 141 sys.stdout.flush() |
128 | 142 |
129 def stop(self): | 143 def stop(self): |
130 self._stopped.set() | 144 self._stopped.set() |
131 | 145 |
132 | 146 |
133 class TestRunner(base_test_runner.BaseTestRunner): | 147 class TestRunner(base_test_runner.BaseTestRunner): |
134 def __init__(self, test_options, device, tests, flaky_tests): | 148 def __init__(self, test_options, device, shard_index, max_shard, tests, |
149 flaky_tests): | |
135 """A TestRunner instance runs a perf test on a single device. | 150 """A TestRunner instance runs a perf test on a single device. |
136 | 151 |
137 Args: | 152 Args: |
138 test_options: A PerfOptions object. | 153 test_options: A PerfOptions object. |
139 device: Device to run the tests. | 154 device: Device to run the tests. |
155 shard_index: the index of this device. | |
156 max_shards: the maximum shard index. | |
140 tests: a dict mapping test_name to command. | 157 tests: a dict mapping test_name to command. |
141 flaky_tests: a list of flaky test_name. | 158 flaky_tests: a list of flaky test_name. |
142 """ | 159 """ |
143 super(TestRunner, self).__init__(device, None, 'Release') | 160 super(TestRunner, self).__init__(device, None, 'Release') |
144 self._options = test_options | 161 self._options = test_options |
162 self._shard_index = shard_index | |
163 self._max_shard = max_shard | |
145 self._tests = tests | 164 self._tests = tests |
146 self._flaky_tests = flaky_tests | 165 self._flaky_tests = flaky_tests |
147 | 166 |
148 @staticmethod | 167 @staticmethod |
149 def _IsBetter(result): | 168 def _IsBetter(result): |
150 if result['actual_exit_code'] == 0: | 169 if result['actual_exit_code'] == 0: |
151 return True | 170 return True |
152 pickled = os.path.join(constants.PERF_OUTPUT_DIR, | 171 pickled = os.path.join(constants.PERF_OUTPUT_DIR, |
153 result['name']) | 172 result['name']) |
154 if not os.path.exists(pickled): | 173 if not os.path.exists(pickled): |
155 return True | 174 return True |
156 with file(pickled, 'r') as f: | 175 with file(pickled, 'r') as f: |
157 previous = pickle.loads(f.read()) | 176 previous = pickle.loads(f.read()) |
158 return result['actual_exit_code'] < previous['actual_exit_code'] | 177 return result['actual_exit_code'] < previous['actual_exit_code'] |
159 | 178 |
160 @staticmethod | 179 @staticmethod |
161 def _SaveResult(result): | 180 def _SaveResult(result): |
162 if TestRunner._IsBetter(result): | 181 if TestRunner._IsBetter(result): |
163 with file(os.path.join(constants.PERF_OUTPUT_DIR, | 182 with file(os.path.join(constants.PERF_OUTPUT_DIR, |
164 result['name']), 'w') as f: | 183 result['name']), 'w') as f: |
165 f.write(pickle.dumps(result)) | 184 f.write(pickle.dumps(result)) |
166 | 185 |
186 def _CheckDeviceAffinity(self, test_name): | |
187 """Returns True if test_name has affinity for this shard.""" | |
188 affinity = (self._tests['steps'][test_name]['device_affinity'] % | |
189 self._max_shard) | |
190 if self._shard_index == affinity: | |
191 return True | |
192 logging.info('Skipping %s on %s (affinity is %s, device is %s)', | |
193 test_name, self.device_serial, affinity, self._shard_index) | |
194 return False | |
195 | |
167 def _LaunchPerfTest(self, test_name): | 196 def _LaunchPerfTest(self, test_name): |
168 """Runs a perf test. | 197 """Runs a perf test. |
169 | 198 |
170 Args: | 199 Args: |
171 test_name: the name of the test to be executed. | 200 test_name: the name of the test to be executed. |
172 | 201 |
173 Returns: | 202 Returns: |
174 A tuple containing (Output, base_test_result.ResultType) | 203 A tuple containing (Output, base_test_result.ResultType) |
175 """ | 204 """ |
205 if not self._CheckDeviceAffinity(test_name): | |
206 return '', base_test_result.ResultType.PASS | |
207 | |
176 try: | 208 try: |
177 logging.warning('Unmapping device ports') | 209 logging.warning('Unmapping device ports') |
178 forwarder.Forwarder.UnmapAllDevicePorts(self.device) | 210 forwarder.Forwarder.UnmapAllDevicePorts(self.device) |
179 self.device.old_interface.RestartAdbdOnDevice() | 211 self.device.old_interface.RestartAdbdOnDevice() |
180 except Exception as e: | 212 except Exception as e: |
181 logging.error('Exception when tearing down device %s', e) | 213 logging.error('Exception when tearing down device %s', e) |
182 | 214 |
183 cmd = ('%s --device %s' % | 215 cmd = ('%s --device %s' % |
184 (self._tests[test_name], self.device.old_interface.GetDevice())) | 216 (self._tests['steps'][test_name]['cmd'], |
217 self.device_serial)) | |
185 logging.info('%s : %s', test_name, cmd) | 218 logging.info('%s : %s', test_name, cmd) |
186 start_time = datetime.datetime.now() | 219 start_time = datetime.datetime.now() |
187 | 220 |
188 timeout = 5400 | 221 timeout = 5400 |
189 if self._options.no_timeout: | 222 if self._options.no_timeout: |
190 timeout = None | 223 timeout = None |
191 full_cmd = cmd | 224 full_cmd = cmd |
192 if self._options.dry_run: | 225 if self._options.dry_run: |
193 full_cmd = 'echo %s' % cmd | 226 full_cmd = 'echo %s' % cmd |
194 | 227 |
(...skipping 10 matching lines...) Expand all Loading... | |
205 withexitstatus=True, logfile=logfile, timeout=timeout, | 238 withexitstatus=True, logfile=logfile, timeout=timeout, |
206 env=os.environ) | 239 env=os.environ) |
207 if self._options.single_step: | 240 if self._options.single_step: |
208 # Stop the logger. | 241 # Stop the logger. |
209 logfile.stop() | 242 logfile.stop() |
210 end_time = datetime.datetime.now() | 243 end_time = datetime.datetime.now() |
211 if exit_code is None: | 244 if exit_code is None: |
212 exit_code = -1 | 245 exit_code = -1 |
213 logging.info('%s : exit_code=%d in %d secs at %s', | 246 logging.info('%s : exit_code=%d in %d secs at %s', |
214 test_name, exit_code, (end_time - start_time).seconds, | 247 test_name, exit_code, (end_time - start_time).seconds, |
215 self.device.old_interface.GetDevice()) | 248 self.device_serial) |
216 result_type = base_test_result.ResultType.FAIL | 249 result_type = base_test_result.ResultType.FAIL |
217 if exit_code == 0: | 250 if exit_code == 0: |
218 result_type = base_test_result.ResultType.PASS | 251 result_type = base_test_result.ResultType.PASS |
219 actual_exit_code = exit_code | 252 actual_exit_code = exit_code |
220 if test_name in self._flaky_tests: | 253 if test_name in self._flaky_tests: |
221 # The exit_code is used at the second stage when printing the | 254 # The exit_code is used at the second stage when printing the |
222 # test output. If the test is flaky, force to "0" to get that step green | 255 # test output. If the test is flaky, force to "0" to get that step green |
223 # whilst still gathering data to the perf dashboards. | 256 # whilst still gathering data to the perf dashboards. |
224 # The result_type is used by the test_dispatcher to retry the test. | 257 # The result_type is used by the test_dispatcher to retry the test. |
225 exit_code = 0 | 258 exit_code = 0 |
226 | 259 |
227 persisted_result = { | 260 persisted_result = { |
228 'name': test_name, | 261 'name': test_name, |
229 'output': output, | 262 'output': output, |
230 'exit_code': exit_code, | 263 'exit_code': exit_code, |
231 'actual_exit_code': actual_exit_code, | 264 'actual_exit_code': actual_exit_code, |
232 'result_type': result_type, | 265 'result_type': result_type, |
233 'total_time': (end_time - start_time).seconds, | 266 'total_time': (end_time - start_time).seconds, |
234 'device': self.device.old_interface.GetDevice(), | 267 'device': self.device_serial, |
235 'cmd': cmd, | 268 'cmd': cmd, |
236 } | 269 } |
237 self._SaveResult(persisted_result) | 270 self._SaveResult(persisted_result) |
238 | 271 |
239 return (output, result_type) | 272 return (output, result_type) |
240 | 273 |
241 def RunTest(self, test_name): | 274 def RunTest(self, test_name): |
242 """Run a perf test on the device. | 275 """Run a perf test on the device. |
243 | 276 |
244 Args: | 277 Args: |
245 test_name: String to use for logging the test result. | 278 test_name: String to use for logging the test result. |
246 | 279 |
247 Returns: | 280 Returns: |
248 A tuple of (TestRunResults, retry). | 281 A tuple of (TestRunResults, retry). |
249 """ | 282 """ |
250 _, result_type = self._LaunchPerfTest(test_name) | 283 _, result_type = self._LaunchPerfTest(test_name) |
251 results = base_test_result.TestRunResults() | 284 results = base_test_result.TestRunResults() |
252 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) | 285 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) |
253 retry = None | 286 retry = None |
254 if not results.DidRunPass(): | 287 if not results.DidRunPass(): |
255 retry = test_name | 288 retry = test_name |
256 return results, retry | 289 return results, retry |
OLD | NEW |