| OLD | NEW |
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 | 5 |
| 6 import logging | 6 import logging |
| 7 import re | 7 import re |
| 8 import os | 8 import os |
| 9 | 9 |
| 10 from pylib import constants | 10 from pylib import constants |
| (...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 114 constants.TEST_EXECUTABLE_DIR + '/linux_dumper_unittest_helper') | 114 constants.TEST_EXECUTABLE_DIR + '/linux_dumper_unittest_helper') |
| 115 | 115 |
| 116 def _WatchTestOutput(self, p): | 116 def _WatchTestOutput(self, p): |
| 117 """Watches the test output. | 117 """Watches the test output. |
| 118 Args: | 118 Args: |
| 119 p: the process generating output as created by pexpect.spawn. | 119 p: the process generating output as created by pexpect.spawn. |
| 120 """ | 120 """ |
| 121 ok_tests = [] | 121 ok_tests = [] |
| 122 failed_tests = [] | 122 failed_tests = [] |
| 123 crashed_tests = [] | 123 crashed_tests = [] |
| 124 timed_out = False | 124 timed_out_tests = [] |
| 125 overall_fail = False | 125 overall_fail = False |
| 126 overall_timed_out = False |
| 126 | 127 |
| 127 # Test case statuses. | 128 # Test case statuses. |
| 128 re_run = re.compile('\[ RUN \] ?(.*)\r\n') | 129 re_run = re.compile('\[ RUN \] ?(.*)\r\n') |
| 129 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') | 130 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') |
| 130 re_ok = re.compile('\[ OK \] ?(.*?) .*\r\n') | 131 re_ok = re.compile('\[ OK \] ?(.*?) .*\r\n') |
| 131 | 132 |
| 132 # Test run statuses. | 133 # Test run statuses. |
| 133 re_passed = re.compile('\[ PASSED \] ?(.*)\r\n') | 134 re_passed = re.compile('\[ PASSED \] ?(.*)\r\n') |
| 134 re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') | 135 re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') |
| 135 # Signal handlers are installed before starting tests | 136 # Signal handlers are installed before starting tests |
| 136 # to output the CRASHED marker when a crash happens. | 137 # to output the CRASHED marker when a crash happens. |
| 137 re_crash = re.compile('\[ CRASHED \](.*)\r\n') | 138 re_crash = re.compile('\[ CRASHED \](.*)\r\n') |
| 138 | 139 |
| 139 try: | 140 try: |
| 140 while True: | 141 while True: |
| 142 full_test_name = None |
| 143 |
| 141 found = p.expect([re_run, re_passed, re_runner_fail], | 144 found = p.expect([re_run, re_passed, re_runner_fail], |
| 142 timeout=self.timeout) | 145 timeout=self.timeout) |
| 143 if found == 1: # re_passed | 146 if found == 1: # re_passed |
| 144 break | 147 break |
| 145 elif found == 2: # re_runner_fail | 148 elif found == 2: # re_runner_fail |
| 146 overall_fail = True | 149 overall_fail = True |
| 147 break | 150 break |
| 148 else: # re_run | 151 else: # re_run |
| 149 if self.dump_debug_info: | 152 if self.dump_debug_info: |
| 150 self.dump_debug_info.TakeScreenshot('_Test_Start_Run_') | 153 self.dump_debug_info.TakeScreenshot('_Test_Start_Run_') |
| 151 | 154 |
| 152 full_test_name = p.match.group(1).replace('\r', '') | 155 full_test_name = p.match.group(1).replace('\r', '') |
| 153 found = p.expect([re_ok, re_fail, re_crash], timeout=self.timeout) | 156 found = p.expect([re_ok, re_fail, re_crash], timeout=self.timeout) |
| 154 if found == 0: # re_ok | 157 if found == 0: # re_ok |
| 155 if full_test_name == p.match.group(1).replace('\r', ''): | 158 if full_test_name == p.match.group(1).replace('\r', ''): |
| 156 ok_tests += [BaseTestResult(full_test_name, p.before)] | 159 ok_tests += [BaseTestResult(full_test_name, p.before)] |
| 157 elif found == 2: # re_crash | 160 elif found == 2: # re_crash |
| 158 crashed_tests += [BaseTestResult(full_test_name, p.before)] | 161 crashed_tests += [BaseTestResult(full_test_name, p.before)] |
| 159 overall_fail = True | 162 overall_fail = True |
| 160 break | 163 break |
| 161 else: # re_fail | 164 else: # re_fail |
| 162 failed_tests += [BaseTestResult(full_test_name, p.before)] | 165 failed_tests += [BaseTestResult(full_test_name, p.before)] |
| 163 except pexpect.EOF: | 166 except pexpect.EOF: |
| 164 logging.error('Test terminated - EOF') | 167 logging.error('Test terminated - EOF') |
| 165 raise errors.DeviceUnresponsiveError('Device may be offline') | 168 raise errors.DeviceUnresponsiveError('Device may be offline') |
| 166 except pexpect.TIMEOUT: | 169 except pexpect.TIMEOUT: |
| 167 logging.error('Test terminated after %d second timeout.', | 170 logging.error('Test terminated after %d second timeout.', |
| 168 self.timeout) | 171 self.timeout) |
| 169 timed_out = True | 172 overall_timed_out = True |
| 173 if full_test_name: |
| 174 timed_out_tests += [BaseTestResult(full_test_name, p.before)] |
| 170 finally: | 175 finally: |
| 171 p.close() | 176 p.close() |
| 172 | 177 |
| 173 ret_code = self._GetGTestReturnCode() | 178 ret_code = self._GetGTestReturnCode() |
| 174 if ret_code: | 179 if ret_code: |
| 175 logging.critical( | 180 logging.critical( |
| 176 'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s', | 181 'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s', |
| 177 ret_code, p.before, p.after) | 182 ret_code, p.before, p.after) |
| 178 overall_fail = True | 183 overall_fail = True |
| 179 | 184 |
| 180 # Create TestResults and return | 185 # Create TestResults and return |
| 181 return TestResults.FromRun(ok=ok_tests, failed=failed_tests, | 186 return TestResults.FromRun(ok=ok_tests, failed=failed_tests, |
| 182 crashed=crashed_tests, timed_out=timed_out, | 187 crashed=crashed_tests, timed_out=timed_out_tests, |
| 183 overall_fail=overall_fail) | 188 overall_fail=overall_fail, |
| 189 overall_timed_out=overall_timed_out) |
| OLD | NEW |