OLD | NEW |
1 # Copyright 2013 The Chromium Authors. All rights reserved. | 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 """Runs perf tests. | 5 """Runs perf tests. |
6 | 6 |
7 Our buildbot infrastructure requires each slave to run steps serially. | 7 Our buildbot infrastructure requires each slave to run steps serially. |
8 This is sub-optimal for android, where these steps can run independently on | 8 This is sub-optimal for android, where these steps can run independently on |
9 multiple connected devices. | 9 multiple connected devices. |
10 | 10 |
(...skipping 29 matching lines...) Expand all Loading... |
40 "step_name_foo", | 40 "step_name_foo", |
41 "step_name_bar" | 41 "step_name_bar" |
42 ] | 42 ] |
43 | 43 |
44 Note that script_to_execute necessarily have to take at least the following | 44 Note that script_to_execute necessarily have to take at least the following |
45 option: | 45 option: |
46 --device: the serial number to be passed to all adb commands. | 46 --device: the serial number to be passed to all adb commands. |
47 """ | 47 """ |
48 | 48 |
49 import collections | 49 import collections |
50 import datetime | |
51 import json | 50 import json |
52 import logging | 51 import logging |
53 import os | 52 import os |
54 import pickle | 53 import pickle |
55 import shutil | 54 import shutil |
56 import sys | 55 import sys |
57 import tempfile | 56 import tempfile |
58 import threading | 57 import threading |
59 import time | 58 import time |
60 | 59 |
(...skipping 19 matching lines...) Expand all Loading... |
80 def OutputJsonList(json_input, json_output): | 79 def OutputJsonList(json_input, json_output): |
81 with file(json_input, 'r') as i: | 80 with file(json_input, 'r') as i: |
82 all_steps = json.load(i) | 81 all_steps = json.load(i) |
83 | 82 |
84 step_values = [] | 83 step_values = [] |
85 for k, v in all_steps['steps'].iteritems(): | 84 for k, v in all_steps['steps'].iteritems(): |
86 data = {'test': k, 'device_affinity': v['device_affinity']} | 85 data = {'test': k, 'device_affinity': v['device_affinity']} |
87 | 86 |
88 persisted_result = GetPersistedResult(k) | 87 persisted_result = GetPersistedResult(k) |
89 if persisted_result: | 88 if persisted_result: |
| 89 data['start_time'] = persisted_result['start_time'] |
| 90 data['end_time'] = persisted_result['end_time'] |
90 data['total_time'] = persisted_result['total_time'] | 91 data['total_time'] = persisted_result['total_time'] |
91 step_values.append(data) | 92 step_values.append(data) |
92 | 93 |
93 with file(json_output, 'w') as o: | 94 with file(json_output, 'w') as o: |
94 o.write(json.dumps(step_values)) | 95 o.write(json.dumps(step_values)) |
95 return 0 | 96 return 0 |
96 | 97 |
97 | 98 |
98 def PrintTestOutput(test_name, json_file_name=None): | 99 def PrintTestOutput(test_name, json_file_name=None): |
99 """Helper method to print the output of previously executed test_name. | 100 """Helper method to print the output of previously executed test_name. |
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
275 self._device_battery.LetBatteryCoolToTemperature( | 276 self._device_battery.LetBatteryCoolToTemperature( |
276 self._options.max_battery_temp) | 277 self._options.max_battery_temp) |
277 | 278 |
278 logging.info('Charge level: %s%%', | 279 logging.info('Charge level: %s%%', |
279 str(self._device_battery.GetBatteryInfo().get('level'))) | 280 str(self._device_battery.GetBatteryInfo().get('level'))) |
280 if self._options.min_battery_level: | 281 if self._options.min_battery_level: |
281 self._device_battery.ChargeDeviceToLevel( | 282 self._device_battery.ChargeDeviceToLevel( |
282 self._options.min_battery_level) | 283 self._options.min_battery_level) |
283 | 284 |
284 logging.info('%s : %s', test_name, cmd) | 285 logging.info('%s : %s', test_name, cmd) |
285 start_time = datetime.datetime.now() | 286 start_time = time.time() |
286 | 287 |
287 timeout = self._tests['steps'][test_name].get('timeout', 5400) | 288 timeout = self._tests['steps'][test_name].get('timeout', 5400) |
288 if self._options.no_timeout: | 289 if self._options.no_timeout: |
289 timeout = None | 290 timeout = None |
290 logging.info('Timeout for %s test: %s', test_name, timeout) | 291 logging.info('Timeout for %s test: %s', test_name, timeout) |
291 full_cmd = cmd | 292 full_cmd = cmd |
292 if self._options.dry_run: | 293 if self._options.dry_run: |
293 full_cmd = 'echo %s' % cmd | 294 full_cmd = 'echo %s' % cmd |
294 | 295 |
295 logfile = sys.stdout | 296 logfile = sys.stdout |
296 if self._options.single_step: | 297 if self._options.single_step: |
297 # Just print a heart-beat so that the outer buildbot scripts won't timeout | 298 # Just print a heart-beat so that the outer buildbot scripts won't timeout |
298 # without response. | 299 # without response. |
299 logfile = _HeartBeatLogger() | 300 logfile = _HeartBeatLogger() |
300 cwd = os.path.abspath(constants.DIR_SOURCE_ROOT) | 301 cwd = os.path.abspath(constants.DIR_SOURCE_ROOT) |
301 if full_cmd.startswith('src/'): | 302 if full_cmd.startswith('src/'): |
302 cwd = os.path.abspath(os.path.join(constants.DIR_SOURCE_ROOT, os.pardir)) | 303 cwd = os.path.abspath(os.path.join(constants.DIR_SOURCE_ROOT, os.pardir)) |
303 try: | 304 try: |
304 exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout( | 305 exit_code, output = cmd_helper.GetCmdStatusAndOutputWithTimeout( |
305 full_cmd, timeout, cwd=cwd, shell=True, logfile=logfile) | 306 full_cmd, timeout, cwd=cwd, shell=True, logfile=logfile) |
306 json_output = self._ReadChartjsonOutput() | 307 json_output = self._ReadChartjsonOutput() |
307 except cmd_helper.TimeoutError as e: | 308 except cmd_helper.TimeoutError as e: |
308 exit_code = -1 | 309 exit_code = -1 |
309 output = str(e) | 310 output = str(e) |
310 json_output = '' | 311 json_output = '' |
311 finally: | 312 finally: |
312 self._CleanupOutputDirectory() | 313 self._CleanupOutputDirectory() |
313 if self._options.single_step: | 314 if self._options.single_step: |
314 logfile.stop() | 315 logfile.stop() |
315 end_time = datetime.datetime.now() | 316 end_time = time.time() |
316 if exit_code is None: | 317 if exit_code is None: |
317 exit_code = -1 | 318 exit_code = -1 |
318 logging.info('%s : exit_code=%d in %d secs at %s', | 319 logging.info('%s : exit_code=%d in %d secs at %s', |
319 test_name, exit_code, (end_time - start_time).seconds, | 320 test_name, exit_code, end_time - start_time, |
320 self.device_serial) | 321 self.device_serial) |
321 | 322 |
322 if exit_code == 0: | 323 if exit_code == 0: |
323 result_type = base_test_result.ResultType.PASS | 324 result_type = base_test_result.ResultType.PASS |
324 else: | 325 else: |
325 result_type = base_test_result.ResultType.FAIL | 326 result_type = base_test_result.ResultType.FAIL |
326 # Since perf tests use device affinity, give the device a chance to | 327 # Since perf tests use device affinity, give the device a chance to |
327 # recover if it is offline after a failure. Otherwise, the master sharder | 328 # recover if it is offline after a failure. Otherwise, the master sharder |
328 # will remove it from the pool and future tests on this device will fail. | 329 # will remove it from the pool and future tests on this device will fail. |
329 try: | 330 try: |
330 self.device.WaitUntilFullyBooted(timeout=120) | 331 self.device.WaitUntilFullyBooted(timeout=120) |
331 except device_errors.CommandTimeoutError as e: | 332 except device_errors.CommandTimeoutError as e: |
332 logging.error('Device failed to return after %s: %s' % (test_name, e)) | 333 logging.error('Device failed to return after %s: %s' % (test_name, e)) |
333 | 334 |
334 actual_exit_code = exit_code | 335 actual_exit_code = exit_code |
335 if test_name in self._flaky_tests: | 336 if test_name in self._flaky_tests: |
336 # The exit_code is used at the second stage when printing the | 337 # The exit_code is used at the second stage when printing the |
337 # test output. If the test is flaky, force to "0" to get that step green | 338 # test output. If the test is flaky, force to "0" to get that step green |
338 # whilst still gathering data to the perf dashboards. | 339 # whilst still gathering data to the perf dashboards. |
339 # The result_type is used by the test_dispatcher to retry the test. | 340 # The result_type is used by the test_dispatcher to retry the test. |
340 exit_code = 0 | 341 exit_code = 0 |
341 | 342 |
342 persisted_result = { | 343 persisted_result = { |
343 'name': test_name, | 344 'name': test_name, |
344 'output': [output], | 345 'output': [output], |
345 'chartjson': json_output, | 346 'chartjson': json_output, |
346 'exit_code': exit_code, | 347 'exit_code': exit_code, |
347 'actual_exit_code': actual_exit_code, | 348 'actual_exit_code': actual_exit_code, |
348 'result_type': result_type, | 349 'result_type': result_type, |
349 'total_time': (end_time - start_time).seconds, | 350 'start_time': start_time, |
| 351 'end_time': end_time, |
| 352 'total_time': end_time - start_time, |
350 'device': self.device_serial, | 353 'device': self.device_serial, |
351 'cmd': cmd, | 354 'cmd': cmd, |
352 } | 355 } |
353 self._SaveResult(persisted_result) | 356 self._SaveResult(persisted_result) |
354 | 357 |
355 return (output, result_type) | 358 return (output, result_type) |
356 | 359 |
357 def RunTest(self, test_name): | 360 def RunTest(self, test_name): |
358 """Run a perf test on the device. | 361 """Run a perf test on the device. |
359 | 362 |
360 Args: | 363 Args: |
361 test_name: String to use for logging the test result. | 364 test_name: String to use for logging the test result. |
362 | 365 |
363 Returns: | 366 Returns: |
364 A tuple of (TestRunResults, retry). | 367 A tuple of (TestRunResults, retry). |
365 """ | 368 """ |
366 _, result_type = self._LaunchPerfTest(test_name) | 369 _, result_type = self._LaunchPerfTest(test_name) |
367 results = base_test_result.TestRunResults() | 370 results = base_test_result.TestRunResults() |
368 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) | 371 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) |
369 retry = None | 372 retry = None |
370 if not results.DidRunPass(): | 373 if not results.DidRunPass(): |
371 retry = test_name | 374 retry = test_name |
372 return results, retry | 375 return results, retry |
OLD | NEW |