Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(34)

Side by Side Diff: build/android/test_package.py

Issue 9185043: Increase Android test robustness. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Remove 'ALWAYS' Created 8 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « build/android/single_test_runner.py ('k') | build/android/test_package_executable.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 # Copyright (c) 2011 The Chromium Authors. All rights reserved. 1 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
2 # Use of this source code is governed by a BSD-style license that can be 2 # Use of this source code is governed by a BSD-style license that can be
3 # found in the LICENSE file. 3 # found in the LICENSE file.
4 4
5 5
6 import logging 6 import logging
7 import re 7 import re
8 import os 8 import os
9 import pexpect 9 import pexpect
10 10
(...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after
121 ret += [current + test_name] 121 ret += [current + test_name]
122 return ret 122 return ret
123 123
124 def _WatchTestOutput(self, p): 124 def _WatchTestOutput(self, p):
125 """Watches the test output. 125 """Watches the test output.
126 Args: 126 Args:
127 p: the process generating output as created by pexpect.spawn. 127 p: the process generating output as created by pexpect.spawn.
128 """ 128 """
129 ok_tests = [] 129 ok_tests = []
130 failed_tests = [] 130 failed_tests = []
131 timed_out = False
131 re_run = re.compile('\[ RUN \] ?(.*)\r\n') 132 re_run = re.compile('\[ RUN \] ?(.*)\r\n')
132 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n') 133 re_fail = re.compile('\[ FAILED \] ?(.*)\r\n')
133 re_ok = re.compile('\[ OK \] ?(.*)\r\n') 134 re_ok = re.compile('\[ OK \] ?(.*)\r\n')
134 (io_stats_before, ready_to_continue) = self._BeginGetIOStats() 135 (io_stats_before, ready_to_continue) = self._BeginGetIOStats()
135 while ready_to_continue: 136 while ready_to_continue:
136 found = p.expect([re_run, pexpect.EOF], timeout=self.timeout) 137 found = p.expect([re_run, pexpect.EOF], timeout=self.timeout)
137 if found == 1: # matched pexpect.EOF 138 if found == 1: # matched pexpect.EOF
138 break 139 break
139 if self.dump_debug_info: 140 if self.dump_debug_info:
140 self.dump_debug_info.TakeScreenshot('_Test_Start_Run_') 141 self.dump_debug_info.TakeScreenshot('_Test_Start_Run_')
141 full_test_name = p.match.group(1) 142 full_test_name = p.match.group(1)
142 found = p.expect([re_ok, re_fail, pexpect.EOF, pexpect.TIMEOUT], 143 found = p.expect([re_ok, re_fail, pexpect.EOF, pexpect.TIMEOUT],
143 timeout=self.timeout) 144 timeout=self.timeout)
144 if found == 0: # re_ok 145 if found == 0: # re_ok
145 ok_tests += [BaseTestResult(full_test_name.replace('\r', ''), 146 ok_tests += [BaseTestResult(full_test_name.replace('\r', ''),
146 p.before)] 147 p.before)]
147 continue 148 continue
148 failed_tests += [BaseTestResult(full_test_name.replace('\r', ''), 149 failed_tests += [BaseTestResult(full_test_name.replace('\r', ''),
149 p.before)] 150 p.before)]
150 if found >= 2: 151 if found >= 2:
151 # The test crashed / bailed out (i.e., didn't print OK or FAIL). 152 # The test crashed / bailed out (i.e., didn't print OK or FAIL).
152 if found == 3: # pexpect.TIMEOUT 153 if found == 3: # pexpect.TIMEOUT
153 logging.error('Test terminated after %d second timeout.', 154 logging.error('Test terminated after %d second timeout.',
154 self.timeout) 155 self.timeout)
156 timed_out = True
155 break 157 break
156 p.close() 158 p.close()
157 if not self.rebaseline and ready_to_continue: 159 if not self.rebaseline and ready_to_continue:
158 ok_tests += self._EndGetIOStats(io_stats_before) 160 ok_tests += self._EndGetIOStats(io_stats_before)
159 ret_code = self._GetGTestReturnCode() 161 ret_code = self._GetGTestReturnCode()
160 if ret_code: 162 if ret_code:
161 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code, 163 failed_tests += [BaseTestResult('gtest exit code: %d' % ret_code,
162 'pexpect.before: %s' 164 'pexpect.before: %s'
163 '\npexpect.after: %s' 165 '\npexpect.after: %s'
164 % (p.before, 166 % (p.before,
165 p.after))] 167 p.after))]
166 return TestResults.FromOkAndFailed(ok_tests, failed_tests) 168 return TestResults.FromOkAndFailed(ok_tests, failed_tests, timed_out)
OLDNEW
« no previous file with comments | « build/android/single_test_runner.py ('k') | build/android/test_package_executable.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698