OLD | NEW |
1 # Copyright 2013 The Chromium Authors. All rights reserved. | 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Runs perf tests. | 5 """Runs perf tests. |
6 | 6 |
7 Our buildbot infrastructure requires each slave to run steps serially. | 7 Our buildbot infrastructure requires each slave to run steps serially. |
8 This is sub-optimal for android, where these steps can run independently on | 8 This is sub-optimal for android, where these steps can run independently on |
9 multiple connected devices. | 9 multiple connected devices. |
10 | 10 |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
56 import sys | 56 import sys |
57 import tempfile | 57 import tempfile |
58 import threading | 58 import threading |
59 import time | 59 import time |
60 | 60 |
61 from pylib import cmd_helper | 61 from pylib import cmd_helper |
62 from pylib import constants | 62 from pylib import constants |
63 from pylib import forwarder | 63 from pylib import forwarder |
64 from pylib.base import base_test_result | 64 from pylib.base import base_test_result |
65 from pylib.base import base_test_runner | 65 from pylib.base import base_test_runner |
| 66 from pylib.device import battery_utils |
66 from pylib.device import device_errors | 67 from pylib.device import device_errors |
67 | 68 |
68 | 69 |
69 def GetPersistedResult(test_name): | 70 def GetPersistedResult(test_name): |
70 file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name) | 71 file_name = os.path.join(constants.PERF_OUTPUT_DIR, test_name) |
71 if not os.path.exists(file_name): | 72 if not os.path.exists(file_name): |
72 logging.error('File not found %s', file_name) | 73 logging.error('File not found %s', file_name) |
73 return None | 74 return None |
74 | 75 |
75 with file(file_name, 'r') as f: | 76 with file(file_name, 'r') as f: |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
184 tests: a dict mapping test_name to command. | 185 tests: a dict mapping test_name to command. |
185 flaky_tests: a list of flaky test_name. | 186 flaky_tests: a list of flaky test_name. |
186 """ | 187 """ |
187 super(TestRunner, self).__init__(device, None) | 188 super(TestRunner, self).__init__(device, None) |
188 self._options = test_options | 189 self._options = test_options |
189 self._shard_index = shard_index | 190 self._shard_index = shard_index |
190 self._max_shard = max_shard | 191 self._max_shard = max_shard |
191 self._tests = tests | 192 self._tests = tests |
192 self._flaky_tests = flaky_tests | 193 self._flaky_tests = flaky_tests |
193 self._output_dir = None | 194 self._output_dir = None |
| 195 self._device_battery = battery_utils.BatteryUtils(self.device) |
194 | 196 |
195 @staticmethod | 197 @staticmethod |
196 def _IsBetter(result): | 198 def _IsBetter(result): |
197 if result['actual_exit_code'] == 0: | 199 if result['actual_exit_code'] == 0: |
198 return True | 200 return True |
199 pickled = os.path.join(constants.PERF_OUTPUT_DIR, | 201 pickled = os.path.join(constants.PERF_OUTPUT_DIR, |
200 result['name']) | 202 result['name']) |
201 if not os.path.exists(pickled): | 203 if not os.path.exists(pickled): |
202 return True | 204 return True |
203 with file(pickled, 'r') as f: | 205 with file(pickled, 'r') as f: |
(...skipping 20 matching lines...) Expand all Loading... |
224 def _CleanupOutputDirectory(self): | 226 def _CleanupOutputDirectory(self): |
225 if self._output_dir: | 227 if self._output_dir: |
226 shutil.rmtree(self._output_dir, ignore_errors=True) | 228 shutil.rmtree(self._output_dir, ignore_errors=True) |
227 self._output_dir = None | 229 self._output_dir = None |
228 | 230 |
229 def _ReadChartjsonOutput(self): | 231 def _ReadChartjsonOutput(self): |
230 if not self._output_dir: | 232 if not self._output_dir: |
231 return '' | 233 return '' |
232 | 234 |
233 json_output_path = os.path.join(self._output_dir, 'results-chart.json') | 235 json_output_path = os.path.join(self._output_dir, 'results-chart.json') |
234 with open(json_output_path) as f: | 236 try: |
235 return f.read() | 237 with open(json_output_path) as f: |
| 238 return f.read() |
| 239 except IOError: |
| 240 logging.exception('Exception when reading chartjson.') |
| 241 logging.error('This usually means that telemetry did not run, so it could' |
| 242 ' not generate the file. Please check the device running' |
| 243 ' the test.') |
| 244 return '' |
236 | 245 |
237 def _LaunchPerfTest(self, test_name): | 246 def _LaunchPerfTest(self, test_name): |
238 """Runs a perf test. | 247 """Runs a perf test. |
239 | 248 |
240 Args: | 249 Args: |
241 test_name: the name of the test to be executed. | 250 test_name: the name of the test to be executed. |
242 | 251 |
243 Returns: | 252 Returns: |
244 A tuple containing (Output, base_test_result.ResultType) | 253 A tuple containing (Output, base_test_result.ResultType) |
245 """ | 254 """ |
246 if not self._CheckDeviceAffinity(test_name): | 255 if not self._CheckDeviceAffinity(test_name): |
247 return '', base_test_result.ResultType.PASS | 256 return '', base_test_result.ResultType.PASS |
248 | 257 |
249 try: | 258 try: |
250 logging.warning('Unmapping device ports') | 259 logging.warning('Unmapping device ports') |
251 forwarder.Forwarder.UnmapAllDevicePorts(self.device) | 260 forwarder.Forwarder.UnmapAllDevicePorts(self.device) |
252 self.device.old_interface.RestartAdbdOnDevice() | 261 self.device.old_interface.RestartAdbdOnDevice() |
253 except Exception as e: | 262 except Exception as e: |
254 logging.error('Exception when tearing down device %s', e) | 263 logging.error('Exception when tearing down device %s', e) |
255 | 264 |
256 cmd = ('%s --device %s' % | 265 cmd = ('%s --device %s' % |
257 (self._tests['steps'][test_name]['cmd'], | 266 (self._tests['steps'][test_name]['cmd'], |
258 self.device_serial)) | 267 self.device_serial)) |
259 | 268 |
260 if self._options.collect_chartjson_data: | 269 if self._options.collect_chartjson_data: |
261 self._output_dir = tempfile.mkdtemp() | 270 self._output_dir = tempfile.mkdtemp() |
262 cmd = cmd + ' --output-dir=%s' % self._output_dir | 271 cmd = cmd + ' --output-dir=%s' % self._output_dir |
263 | 272 |
| 273 logging.info( |
| 274 'temperature: %s (0.1 C)', |
| 275 str(self._device_battery.GetBatteryInfo().get('temperature'))) |
264 logging.info('%s : %s', test_name, cmd) | 276 logging.info('%s : %s', test_name, cmd) |
265 start_time = datetime.datetime.now() | 277 start_time = datetime.datetime.now() |
266 | 278 |
267 timeout = self._tests['steps'][test_name].get('timeout', 5400) | 279 timeout = self._tests['steps'][test_name].get('timeout', 5400) |
268 if self._options.no_timeout: | 280 if self._options.no_timeout: |
269 timeout = None | 281 timeout = None |
270 logging.info('Timeout for %s test: %s', test_name, timeout) | 282 logging.info('Timeout for %s test: %s', test_name, timeout) |
271 full_cmd = cmd | 283 full_cmd = cmd |
272 if self._options.dry_run: | 284 if self._options.dry_run: |
273 full_cmd = 'echo %s' % cmd | 285 full_cmd = 'echo %s' % cmd |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
343 Returns: | 355 Returns: |
344 A tuple of (TestRunResults, retry). | 356 A tuple of (TestRunResults, retry). |
345 """ | 357 """ |
346 _, result_type = self._LaunchPerfTest(test_name) | 358 _, result_type = self._LaunchPerfTest(test_name) |
347 results = base_test_result.TestRunResults() | 359 results = base_test_result.TestRunResults() |
348 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) | 360 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) |
349 retry = None | 361 retry = None |
350 if not results.DidRunPass(): | 362 if not results.DidRunPass(): |
351 retry = test_name | 363 retry = test_name |
352 return results, retry | 364 return results, retry |
OLD | NEW |