OLD | NEW |
1 # Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 # Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 # Use of this source code is governed by a BSD-style license that can be | 2 # Use of this source code is governed by a BSD-style license that can be |
3 # found in the LICENSE file. | 3 # found in the LICENSE file. |
4 | 4 |
5 | 5 |
6 import logging | 6 import logging |
7 import re | 7 import re |
8 import os | 8 import os |
9 import pexpect | 9 import pexpect |
10 | 10 |
(...skipping 17 matching lines...) Expand all Loading... |
28 performance_test: Whether or not performance test(s). | 28 performance_test: Whether or not performance test(s). |
29 cleanup_test_files: Whether or not to cleanup test files on device. | 29 cleanup_test_files: Whether or not to cleanup test files on device. |
30 tool: Name of the Valgrind tool. | 30 tool: Name of the Valgrind tool. |
31 dump_debug_info: A debug_info object. | 31 dump_debug_info: A debug_info object. |
32 """ | 32 """ |
33 | 33 |
34 def __init__(self, adb, device, test_suite, timeout, rebaseline, | 34 def __init__(self, adb, device, test_suite, timeout, rebaseline, |
35 performance_test, cleanup_test_files, tool, dump_debug_info): | 35 performance_test, cleanup_test_files, tool, dump_debug_info): |
36 self.adb = adb | 36 self.adb = adb |
37 self.device = device | 37 self.device = device |
| 38 self.test_suite_full = test_suite |
38 self.test_suite = os.path.splitext(test_suite)[0] | 39 self.test_suite = os.path.splitext(test_suite)[0] |
39 self.test_suite_basename = os.path.basename(self.test_suite) | 40 self.test_suite_basename = os.path.basename(self.test_suite) |
40 self.test_suite_dirname = os.path.dirname(self.test_suite) | 41 self.test_suite_dirname = os.path.dirname(self.test_suite) |
41 self.rebaseline = rebaseline | 42 self.rebaseline = rebaseline |
42 self.performance_test = performance_test | 43 self.performance_test = performance_test |
43 self.cleanup_test_files = cleanup_test_files | 44 self.cleanup_test_files = cleanup_test_files |
44 self.tool = CreateTool(tool, self.adb) | 45 self.tool = CreateTool(tool, self.adb) |
45 if timeout == 0: | 46 if timeout == 0: |
46 if self.test_suite_basename == 'page_cycler_tests': | 47 if self.test_suite_basename == 'page_cycler_tests': |
47 timeout = 900 | 48 timeout = 900 |
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
122 return ret | 123 return ret |
123 | 124 |
124 def _WatchTestOutput(self, p): | 125 def _WatchTestOutput(self, p): |
125 """Watches the test output. | 126 """Watches the test output. |
126 Args: | 127 Args: |
127 p: the process generating output as created by pexpect.spawn. | 128 p: the process generating output as created by pexpect.spawn. |
128 """ | 129 """ |
129 ok_tests = [] | 130 ok_tests = [] |
130 failed_tests = [] | 131 failed_tests = [] |
131 timed_out = False | 132 timed_out = False |
| 133 overall_fail = False |
132 re_run = re.compile('\[ RUN \] ?(.*)\r\n') | 134 re_run = re.compile('\[ RUN \] ?(.*)\r\n') |
133 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') | 135 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') |
| 136 re_runner_fail = re.compile('\[ RUNNER_FAILED \] ?(.*)\r\n') |
134 re_ok = re.compile('\[ OK \] ?(.*)\r\n') | 137 re_ok = re.compile('\[ OK \] ?(.*)\r\n') |
135 (io_stats_before, ready_to_continue) = self._BeginGetIOStats() | 138 (io_stats_before, ready_to_continue) = self._BeginGetIOStats() |
136 while ready_to_continue: | 139 while ready_to_continue: |
137 found = p.expect([re_run, pexpect.EOF], timeout=self.timeout) | 140 found = p.expect([re_run, pexpect.EOF, re_runner_fail], |
| 141 timeout=self.timeout) |
138 if found == 1: # matched pexpect.EOF | 142 if found == 1: # matched pexpect.EOF |
139 break | 143 break |
| 144 if found == 2: # RUNNER_FAILED |
| 145 logging.error('RUNNER_FAILED') |
| 146 overall_fail = True |
| 147 break |
140 if self.dump_debug_info: | 148 if self.dump_debug_info: |
141 self.dump_debug_info.TakeScreenshot('_Test_Start_Run_') | 149 self.dump_debug_info.TakeScreenshot('_Test_Start_Run_') |
142 full_test_name = p.match.group(1) | 150 full_test_name = p.match.group(1) |
143 found = p.expect([re_ok, re_fail, pexpect.EOF, pexpect.TIMEOUT], | 151 found = p.expect([re_ok, re_fail, pexpect.EOF, pexpect.TIMEOUT], |
144 timeout=self.timeout) | 152 timeout=self.timeout) |
145 if found == 0: # re_ok | 153 if found == 0: # re_ok |
146 ok_tests += [BaseTestResult(full_test_name.replace('\r', ''), | 154 ok_tests += [BaseTestResult(full_test_name.replace('\r', ''), |
147 p.before)] | 155 p.before)] |
148 continue | 156 continue |
149 failed_tests += [BaseTestResult(full_test_name.replace('\r', ''), | 157 failed_tests += [BaseTestResult(full_test_name.replace('\r', ''), |
150 p.before)] | 158 p.before)] |
151 if found >= 2: | 159 if found >= 2: |
152 # The test crashed / bailed out (i.e., didn't print OK or FAIL). | 160 # The test crashed / bailed out (i.e., didn't print OK or FAIL). |
153 if found == 3: # pexpect.TIMEOUT | 161 if found == 3: # pexpect.TIMEOUT |
154 logging.error('Test terminated after %d second timeout.', | 162 logging.error('Test terminated after %d second timeout.', |
155 self.timeout) | 163 self.timeout) |
156 timed_out = True | 164 timed_out = True |
157 break | 165 break |
158 p.close() | 166 p.close() |
159 if not self.rebaseline and ready_to_continue: | 167 if not self.rebaseline and ready_to_continue: |
160 ok_tests += self._EndGetIOStats(io_stats_before) | 168 ok_tests += self._EndGetIOStats(io_stats_before) |
161 ret_code = self._GetGTestReturnCode() | 169 ret_code = self._GetGTestReturnCode() |
162 if ret_code: | 170 if ret_code: |
163 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, | 171 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, |
164 'pexpect.before: %s' | 172 'pexpect.before: %s' |
165 '\npexpect.after: %s' | 173 '\npexpect.after: %s' |
166 % (p.before, | 174 % (p.before, |
167 p.after))] | 175 p.after))] |
168 return TestResults.FromOkAndFailed(ok_tests, failed_tests, timed_out) | 176 return TestResults.FromOkAndFailed(ok_tests, failed_tests, |
| 177 timed_out, overall_fail) |
OLD | NEW |