| OLD | NEW |
| (Empty) |
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 # Use of this source code is governed by a BSD-style license that can be | |
| 3 # found in the LICENSE file. | |
| 4 | |
| 5 import logging | |
| 6 import os | |
| 7 import re | |
| 8 import tempfile | |
| 9 | |
| 10 from devil.android import device_errors | |
| 11 from devil.android import ports | |
| 12 from devil.android.perf import perf_control | |
| 13 from pylib import pexpect | |
| 14 from pylib.base import base_test_result | |
| 15 from pylib.base import base_test_runner | |
| 16 from pylib.local import local_test_server_spawner | |
| 17 | |
| 18 | |
| 19 # Test case statuses. | |
| 20 RE_RUN = re.compile('\\[ RUN \\] ?(.*)\r\n') | |
| 21 RE_FAIL = re.compile('\\[ FAILED \\] ?(.*?)( \\((\\d+) ms\\))?\r\r\n') | |
| 22 RE_OK = re.compile('\\[ OK \\] ?(.*?)( \\((\\d+) ms\\))?\r\r\n') | |
| 23 | |
| 24 # Test run statuses. | |
| 25 RE_PASSED = re.compile('\\[ PASSED \\] ?(.*)\r\n') | |
| 26 RE_RUNNER_FAIL = re.compile('\\[ RUNNER_FAILED \\] ?(.*)\r\n') | |
| 27 # Signal handlers are installed before starting tests | |
| 28 # to output the CRASHED marker when a crash happens. | |
| 29 RE_CRASH = re.compile('\\[ CRASHED \\](.*)\r\n') | |
| 30 | |
| 31 # Bots that don't output anything for 20 minutes get timed out, so that's our | |
| 32 # hard cap. | |
| 33 _INFRA_STDOUT_TIMEOUT = 20 * 60 | |
| 34 | |
| 35 | |
| 36 def _TestSuiteRequiresMockTestServer(suite_name): | |
| 37 """Returns True if the test suite requires mock test server.""" | |
| 38 tests_require_net_test_server = ['unit_tests', 'net_unittests', | |
| 39 'components_browsertests', | |
| 40 'content_unittests', | |
| 41 'content_browsertests'] | |
| 42 return (suite_name in | |
| 43 tests_require_net_test_server) | |
| 44 | |
| 45 def _TestSuiteRequiresHighPerfMode(suite_name): | |
| 46 """Returns True if the test suite requires high performance mode.""" | |
| 47 return 'perftests' in suite_name | |
| 48 | |
| 49 class TestRunner(base_test_runner.BaseTestRunner): | |
| 50 def __init__(self, test_options, device, test_package): | |
| 51 """Single test suite attached to a single device. | |
| 52 | |
| 53 Args: | |
| 54 test_options: A GTestOptions object. | |
| 55 device: Device to run the tests. | |
| 56 test_package: An instance of TestPackage class. | |
| 57 """ | |
| 58 | |
| 59 super(TestRunner, self).__init__(device, test_options.tool) | |
| 60 | |
| 61 self.test_package = test_package | |
| 62 self.test_package.tool = self.tool | |
| 63 self._test_arguments = test_options.test_arguments | |
| 64 | |
| 65 timeout = test_options.timeout | |
| 66 if timeout == 0: | |
| 67 timeout = 60 | |
| 68 # On a VM (e.g. chromium buildbots), this timeout is way too small. | |
| 69 if os.environ.get('BUILDBOT_SLAVENAME'): | |
| 70 timeout = timeout * 2 | |
| 71 | |
| 72 self._timeout = min(timeout * self.tool.GetTimeoutScale(), | |
| 73 _INFRA_STDOUT_TIMEOUT) | |
| 74 if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name): | |
| 75 self._perf_controller = perf_control.PerfControl(self.device) | |
| 76 | |
| 77 if _TestSuiteRequiresMockTestServer(self.test_package.suite_name): | |
| 78 self._servers = [ | |
| 79 local_test_server_spawner.LocalTestServerSpawner( | |
| 80 ports.AllocateTestServerPort(), self.device, self.tool)] | |
| 81 else: | |
| 82 self._servers = [] | |
| 83 | |
| 84 if test_options.app_data_files: | |
| 85 self._app_data_files = test_options.app_data_files | |
| 86 if test_options.app_data_file_dir: | |
| 87 self._app_data_file_dir = test_options.app_data_file_dir | |
| 88 else: | |
| 89 self._app_data_file_dir = tempfile.mkdtemp() | |
| 90 logging.critical('Saving app files to %s', self._app_data_file_dir) | |
| 91 else: | |
| 92 self._app_data_files = None | |
| 93 self._app_data_file_dir = None | |
| 94 | |
| 95 #override | |
| 96 def InstallTestPackage(self): | |
| 97 self.test_package.Install(self.device) | |
| 98 | |
| 99 def _ParseTestOutput(self, p): | |
| 100 """Process the test output. | |
| 101 | |
| 102 Args: | |
| 103 p: An instance of pexpect spawn class. | |
| 104 | |
| 105 Returns: | |
| 106 A TestRunResults object. | |
| 107 """ | |
| 108 results = base_test_result.TestRunResults() | |
| 109 | |
| 110 log = '' | |
| 111 try: | |
| 112 while True: | |
| 113 full_test_name = None | |
| 114 | |
| 115 found = p.expect([RE_RUN, RE_PASSED, RE_RUNNER_FAIL], | |
| 116 timeout=self._timeout) | |
| 117 if found == 1: # RE_PASSED | |
| 118 break | |
| 119 elif found == 2: # RE_RUNNER_FAIL | |
| 120 break | |
| 121 else: # RE_RUN | |
| 122 full_test_name = p.match.group(1).replace('\r', '') | |
| 123 found = p.expect([RE_OK, RE_FAIL, RE_CRASH], timeout=self._timeout) | |
| 124 log = p.before.replace('\r', '') | |
| 125 if found == 0: # RE_OK | |
| 126 if full_test_name == p.match.group(1).replace('\r', ''): | |
| 127 duration_ms = int(p.match.group(3)) if p.match.group(3) else 0 | |
| 128 results.AddResult(base_test_result.BaseTestResult( | |
| 129 full_test_name, base_test_result.ResultType.PASS, | |
| 130 duration=duration_ms, log=log)) | |
| 131 elif found == 2: # RE_CRASH | |
| 132 results.AddResult(base_test_result.BaseTestResult( | |
| 133 full_test_name, base_test_result.ResultType.CRASH, | |
| 134 log=log)) | |
| 135 break | |
| 136 else: # RE_FAIL | |
| 137 duration_ms = int(p.match.group(3)) if p.match.group(3) else 0 | |
| 138 results.AddResult(base_test_result.BaseTestResult( | |
| 139 full_test_name, base_test_result.ResultType.FAIL, | |
| 140 duration=duration_ms, log=log)) | |
| 141 except pexpect.EOF: | |
| 142 logging.error('Test terminated - EOF') | |
| 143 # We're here because either the device went offline, or the test harness | |
| 144 # crashed without outputting the CRASHED marker (crbug.com/175538). | |
| 145 if not self.device.IsOnline(): | |
| 146 raise device_errors.DeviceUnreachableError( | |
| 147 'Device %s went offline.' % str(self.device)) | |
| 148 if full_test_name: | |
| 149 results.AddResult(base_test_result.BaseTestResult( | |
| 150 full_test_name, base_test_result.ResultType.CRASH, | |
| 151 log=p.before.replace('\r', ''))) | |
| 152 except pexpect.TIMEOUT: | |
| 153 logging.error('Test terminated after %d second timeout.', | |
| 154 self._timeout) | |
| 155 if full_test_name: | |
| 156 results.AddResult(base_test_result.BaseTestResult( | |
| 157 full_test_name, base_test_result.ResultType.TIMEOUT, | |
| 158 log=p.before.replace('\r', ''))) | |
| 159 finally: | |
| 160 p.close() | |
| 161 | |
| 162 ret_code = self.test_package.GetGTestReturnCode(self.device) | |
| 163 if ret_code: | |
| 164 logging.critical( | |
| 165 'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s', | |
| 166 ret_code, p.before, p.after) | |
| 167 | |
| 168 return results | |
| 169 | |
| 170 #override | |
| 171 def RunTest(self, test): | |
| 172 test_results = base_test_result.TestRunResults() | |
| 173 if not test: | |
| 174 return test_results, None | |
| 175 | |
| 176 try: | |
| 177 self.test_package.ClearApplicationState(self.device) | |
| 178 self.test_package.CreateCommandLineFileOnDevice( | |
| 179 self.device, test, self._test_arguments) | |
| 180 self.test_package.SetPermissions(self.device) | |
| 181 test_results = self._ParseTestOutput( | |
| 182 self.test_package.SpawnTestProcess(self.device)) | |
| 183 if self._app_data_files: | |
| 184 self.test_package.PullAppFiles(self.device, self._app_data_files, | |
| 185 self._app_data_file_dir) | |
| 186 finally: | |
| 187 for s in self._servers: | |
| 188 s.Reset() | |
| 189 # Calculate unknown test results. | |
| 190 all_tests = set(test.split(':')) | |
| 191 all_tests_ran = set([t.GetName() for t in test_results.GetAll()]) | |
| 192 unknown_tests = all_tests - all_tests_ran | |
| 193 test_results.AddResults( | |
| 194 [base_test_result.BaseTestResult(t, base_test_result.ResultType.UNKNOWN) | |
| 195 for t in unknown_tests]) | |
| 196 retry = ':'.join([t.GetName() for t in test_results.GetNotPass()]) | |
| 197 return test_results, retry | |
| 198 | |
| 199 #override | |
| 200 def SetUp(self): | |
| 201 """Sets up necessary test enviroment for the test suite.""" | |
| 202 super(TestRunner, self).SetUp() | |
| 203 for s in self._servers: | |
| 204 s.SetUp() | |
| 205 if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name): | |
| 206 self._perf_controller.SetHighPerfMode() | |
| 207 self.tool.SetupEnvironment() | |
| 208 | |
| 209 #override | |
| 210 def TearDown(self): | |
| 211 """Cleans up the test enviroment for the test suite.""" | |
| 212 for s in self._servers: | |
| 213 s.TearDown() | |
| 214 if _TestSuiteRequiresHighPerfMode(self.test_package.suite_name): | |
| 215 self._perf_controller.SetDefaultPerfMode() | |
| 216 self.test_package.ClearApplicationState(self.device) | |
| 217 self.tool.CleanUpEnvironment() | |
| 218 super(TestRunner, self).TearDown() | |
| OLD | NEW |