Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright 2013 The Chromium Authors. All rights reserved. | 1 # Copyright 2013 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 """Runs perf tests. | 5 """Runs perf tests. |
| 6 | 6 |
| 7 Our buildbot infrastructure requires each slave to run steps serially. | 7 Our buildbot infrastructure requires each slave to run steps serially. |
| 8 This is sub-optimal for android, where these steps can run independently on | 8 This is sub-optimal for android, where these steps can run independently on |
| 9 multiple connected devices. | 9 multiple connected devices. |
| 10 | 10 |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 55 import sys | 55 import sys |
| 56 import threading | 56 import threading |
| 57 import time | 57 import time |
| 58 | 58 |
| 59 from pylib import cmd_helper | 59 from pylib import cmd_helper |
| 60 from pylib import constants | 60 from pylib import constants |
| 61 from pylib import forwarder | 61 from pylib import forwarder |
| 62 from pylib.base import base_test_result | 62 from pylib.base import base_test_result |
| 63 from pylib.base import base_test_runner | 63 from pylib.base import base_test_runner |
| 64 | 64 |
| 65 sys.path.append(os.path.join( | |
|
jbudorick
2014/10/15 08:06:18
(see comment below first)
this would become
fr
tonyg
2014/10/15 16:47:27
Done.
| |
| 66 constants.DIR_SOURCE_ROOT, 'third_party', 'android_testrunner')) | |
| 67 import errors | |
| 65 | 68 |
| 66 def OutputJsonList(json_input, json_output): | 69 def OutputJsonList(json_input, json_output): |
| 67 with file(json_input, 'r') as i: | 70 with file(json_input, 'r') as i: |
| 68 all_steps = json.load(i) | 71 all_steps = json.load(i) |
| 69 step_names = all_steps['steps'].keys() | 72 step_names = all_steps['steps'].keys() |
| 70 with file(json_output, 'w') as o: | 73 with file(json_output, 'w') as o: |
| 71 o.write(json.dumps(step_names)) | 74 o.write(json.dumps(step_names)) |
| 72 return 0 | 75 return 0 |
| 73 | 76 |
| 74 | 77 |
| (...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 245 output = str(e) | 248 output = str(e) |
| 246 finally: | 249 finally: |
| 247 if self._options.single_step: | 250 if self._options.single_step: |
| 248 logfile.stop() | 251 logfile.stop() |
| 249 end_time = datetime.datetime.now() | 252 end_time = datetime.datetime.now() |
| 250 if exit_code is None: | 253 if exit_code is None: |
| 251 exit_code = -1 | 254 exit_code = -1 |
| 252 logging.info('%s : exit_code=%d in %d secs at %s', | 255 logging.info('%s : exit_code=%d in %d secs at %s', |
| 253 test_name, exit_code, (end_time - start_time).seconds, | 256 test_name, exit_code, (end_time - start_time).seconds, |
| 254 self.device_serial) | 257 self.device_serial) |
| 255 result_type = base_test_result.ResultType.FAIL | 258 |
| 256 if exit_code == 0: | 259 if exit_code == 0: |
| 257 result_type = base_test_result.ResultType.PASS | 260 result_type = base_test_result.ResultType.PASS |
| 261 else: | |
| 262 result_type = base_test_result.ResultType.FAIL | |
| 263 # Since perf tests use device affinity, give the device a chance to | |
| 264 # recover if it is offline after a failure. Otherwise, the master sharder | |
| 265 # will remove it from the pool and future tests on this device will fail. | |
| 266 try: | |
| 267 self.device.old_interface.WaitForDevicePm() | |
|
jbudorick
2014/10/15 08:06:18
I'd rather see this use:
self.device.WaitUntilF
tonyg
2014/10/15 16:47:27
Done.
| |
| 268 except errors.WaitForResponseTimedOutError as e: | |
|
jbudorick
2014/10/15 08:06:18
this would become
except device_errors.CommandT
tonyg
2014/10/15 16:47:27
Done.
| |
| 269 logging.error('Device failed to return after %s: %s' % (test_name, e)) | |
| 270 | |
| 258 actual_exit_code = exit_code | 271 actual_exit_code = exit_code |
| 259 if test_name in self._flaky_tests: | 272 if test_name in self._flaky_tests: |
| 260 # The exit_code is used at the second stage when printing the | 273 # The exit_code is used at the second stage when printing the |
| 261 # test output. If the test is flaky, force to "0" to get that step green | 274 # test output. If the test is flaky, force to "0" to get that step green |
| 262 # whilst still gathering data to the perf dashboards. | 275 # whilst still gathering data to the perf dashboards. |
| 263 # The result_type is used by the test_dispatcher to retry the test. | 276 # The result_type is used by the test_dispatcher to retry the test. |
| 264 exit_code = 0 | 277 exit_code = 0 |
| 265 | 278 |
| 266 persisted_result = { | 279 persisted_result = { |
| 267 'name': test_name, | 280 'name': test_name, |
| (...skipping 18 matching lines...) Expand all Loading... | |
| 286 Returns: | 299 Returns: |
| 287 A tuple of (TestRunResults, retry). | 300 A tuple of (TestRunResults, retry). |
| 288 """ | 301 """ |
| 289 _, result_type = self._LaunchPerfTest(test_name) | 302 _, result_type = self._LaunchPerfTest(test_name) |
| 290 results = base_test_result.TestRunResults() | 303 results = base_test_result.TestRunResults() |
| 291 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) | 304 results.AddResult(base_test_result.BaseTestResult(test_name, result_type)) |
| 292 retry = None | 305 retry = None |
| 293 if not results.DidRunPass(): | 306 if not results.DidRunPass(): |
| 294 retry = test_name | 307 retry = test_name |
| 295 return results, retry | 308 return results, retry |
| OLD | NEW |