Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
| 3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
| 4 | 4 |
| 5 | 5 |
| 6 import logging | 6 import logging |
| 7 import re | 7 import re |
| 8 import os | 8 import os |
| 9 | 9 |
| 10 import constants | 10 import constants |
| (...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 117 def _WatchTestOutput(self, p): | 117 def _WatchTestOutput(self, p): |
| 118 """Watches the test output. | 118 """Watches the test output. |
| 119 Args: | 119 Args: |
| 120 p: the process generating output as created by pexpect.spawn. | 120 p: the process generating output as created by pexpect.spawn. |
| 121 """ | 121 """ |
| 122 ok_tests = [] | 122 ok_tests = [] |
| 123 failed_tests = [] | 123 failed_tests = [] |
| 124 crashed_tests = [] | 124 crashed_tests = [] |
| 125 timed_out = False | 125 timed_out = False |
| 126 overall_fail = False | 126 overall_fail = False |
| 127 | |
| 128 # Test case statuses. | |
| 127 re_run = re.compile('\[ RUN \] ?(.*)\r\n') | 129 re_run = re.compile('\[ RUN \] ?(.*)\r\n') |
| 128 # APK tests rely on the PASSED tag. | 130 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') |
|
craigdh
2012/12/14 23:10:47
These regexes are nearly identical.
pattern = '\[
frankf
2012/12/17 20:09:28
There are 3 different patterns, not much to gain.
craigdh
2012/12/17 20:24:09
Ok. I had only caught two when I looked before. It
| |
| 131 re_ok = re.compile('\[ OK \] ?(.*?) .*\r\n') | |
| 132 | |
| 133 # Test run statuses. | |
| 129 re_passed = re.compile('\[ PASSED \] ?(.*)\r\n') | 134 re_passed = re.compile('\[ PASSED \] ?(.*)\r\n') |
| 135 re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') | |
| 130 # Signal handlers are installed before starting tests | 136 # Signal handlers are installed before starting tests |
| 131 # to output the CRASHED marker when a crash happens. | 137 # to output the CRASHED marker when a crash happens. |
| 132 re_crash = re.compile('\[ CRASHED \](.*)\r\n') | 138 re_crash = re.compile('\[ CRASHED \](.*)\r\n') |
| 133 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') | 139 |
| 134 re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') | |
| 135 re_ok = re.compile('\[ OK \] ?(.*?) .*\r\n') | |
| 136 try: | 140 try: |
| 137 while True: | 141 while True: |
| 138 found = p.expect([re_run, re_passed, re_runner_fail], | 142 found = p.expect([re_run, re_passed, re_runner_fail], |
| 139 timeout=self.timeout) | 143 timeout=self.timeout) |
| 140 if found == 1: # matched PASSED. | 144 if found == 1: # re_passed |
| 141 break | 145 break |
| 142 if found == 2: # RUNNER_FAILED | 146 elif found == 2: # re_runner_fail |
| 143 logging.error('RUNNER_FAILED') | |
| 144 overall_fail = True | 147 overall_fail = True |
| 145 break | 148 break |
| 146 if self.dump_debug_info: | 149 else: # re_run |
| 147 self.dump_debug_info.TakeScreenshot('_Test_Start_Run_') | 150 if self.dump_debug_info: |
| 148 full_test_name = p.match.group(1).replace('\r', '') | 151 self.dump_debug_info.TakeScreenshot('_Test_Start_Run_') |
| 149 found = p.expect([re_ok, re_fail, re_crash], timeout=self.timeout) | 152 |
| 150 if found == 0: # re_ok | 153 full_test_name = p.match.group(1).replace('\r', '') |
| 151 if full_test_name == p.match.group(1).replace('\r', ''): | 154 found = p.expect([re_ok, re_fail, re_crash], timeout=self.timeout) |
| 152 ok_tests += [BaseTestResult(full_test_name, p.before)] | 155 if found == 0: # re_ok |
| 153 continue | 156 if full_test_name == p.match.group(1).replace('\r', ''): |
| 154 if found == 2: # re_crash | 157 ok_tests += [BaseTestResult(full_test_name, p.before)] |
| 155 crashed_tests += [BaseTestResult(full_test_name, p.before)] | 158 elif found == 2: # re_crash |
| 156 overall_fail = True | 159 crashed_tests += [BaseTestResult(full_test_name, p.before)] |
| 157 break | 160 overall_fail = True |
| 158 # The test failed. | 161 break |
| 159 failed_tests += [BaseTestResult(full_test_name, p.before)] | 162 else: # re_fail |
| 163 failed_tests += [BaseTestResult(full_test_name, p.before)] | |
| 160 except pexpect.EOF: | 164 except pexpect.EOF: |
| 161 logging.error('Test terminated - EOF') | 165 logging.error('Test terminated - EOF') |
| 162 raise errors.DeviceUnresponsiveError('Device may be offline') | 166 raise errors.DeviceUnresponsiveError('Device may be offline') |
| 163 except pexpect.TIMEOUT: | 167 except pexpect.TIMEOUT: |
| 164 logging.error('Test terminated after %d second timeout.', | 168 logging.error('Test terminated after %d second timeout.', |
| 165 self.timeout) | 169 self.timeout) |
| 166 timed_out = True | 170 timed_out = True |
| 167 finally: | 171 finally: |
| 168 p.close() | 172 p.close() |
| 169 | 173 |
| 170 ret_code = self._GetGTestReturnCode() | 174 ret_code = self._GetGTestReturnCode() |
| 171 if ret_code: | 175 if ret_code: |
| 172 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, | 176 logging.critical( |
| 173 'pexpect.before: %s' | 177 'gtest exit code: %d\npexpect.before: %s\npexpect.after: %s', |
| 174 '\npexpect.after: %s' | 178 ret_code, p.before, p.after) |
| 175 % (p.before, | 179 overall_fail = True |
| 176 p.after))] | 180 |
| 177 # Create TestResults and return | 181 # Create TestResults and return |
| 178 return TestResults.FromRun(ok=ok_tests, failed=failed_tests, | 182 return TestResults.FromRun(ok=ok_tests, failed=failed_tests, |
| 179 crashed=crashed_tests, timed_out=timed_out, | 183 crashed=crashed_tests, timed_out=timed_out, |
| 180 overall_fail=overall_fail) | 184 overall_fail=overall_fail) |
| OLD | NEW |