OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Runs all the native unit tests. | 6 """Runs all the native unit tests. |
7 | 7 |
8 1. Copy over test binary to /data/local on device. | 8 1. Copy over test binary to /data/local on device. |
9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) | 9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) |
10 to be deployed to the device (in /data/local/tmp). | 10 to be deployed to the device (in /data/local/tmp). |
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
125 try: | 125 try: |
126 os.kill(self._pid, signal.SIGKILL) | 126 os.kill(self._pid, signal.SIGKILL) |
127 except: | 127 except: |
128 pass | 128 pass |
129 del os.environ['DISPLAY'] | 129 del os.environ['DISPLAY'] |
130 self._pid = 0 | 130 self._pid = 0 |
131 | 131 |
132 | 132 |
133 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, | 133 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, |
134 timeout, performance_test, cleanup_test_files, tool, | 134 timeout, performance_test, cleanup_test_files, tool, |
135 log_dump_name): | 135 log_dump_name, |
136 fast_and_loose=False): | |
Peter Beverloo
2012/01/12 11:19:18
This would fit on line 135.
| |
136 """Runs the tests. | 137 """Runs the tests. |
137 | 138 |
138 Args: | 139 Args: |
139 device: Device to run the tests. | 140 device: Device to run the tests. |
140 test_suite: A specific test suite to run, empty to run all. | 141 test_suite: A specific test suite to run, empty to run all. |
141 gtest_filter: A gtest_filter flag. | 142 gtest_filter: A gtest_filter flag. |
142 test_arguments: Additional arguments to pass to the test binary. | 143 test_arguments: Additional arguments to pass to the test binary. |
143 rebaseline: Whether or not to run tests in isolation and update the filter. | 144 rebaseline: Whether or not to run tests in isolation and update the filter. |
144 timeout: Timeout for each test. | 145 timeout: Timeout for each test. |
145 performance_test: Whether or not performance test(s). | 146 performance_test: Whether or not performance test(s). |
146 cleanup_test_files: Whether or not to cleanup test files on device. | 147 cleanup_test_files: Whether or not to cleanup test files on device. |
147 tool: Name of the Valgrind tool. | 148 tool: Name of the Valgrind tool. |
148 log_dump_name: Name of log dump file. | 149 log_dump_name: Name of log dump file. |
150 fast_and_loose: should we go extra-fast but sacrifice stability | |
151 and/or correctness? Intended for quick cycle testing; not for bots! | |
149 | 152 |
150 Returns: | 153 Returns: |
151 A TestResults object. | 154 A TestResults object. |
152 """ | 155 """ |
153 results = [] | 156 results = [] |
154 | 157 |
155 if test_suite: | 158 if test_suite: |
156 global _TEST_SUITES | 159 global _TEST_SUITES |
157 if not os.path.exists(test_suite): | 160 if not os.path.exists(test_suite): |
158 logging.critical('Unrecognized test suite, supported: %s' % | 161 logging.critical('Unrecognized test suite, supported: %s' % |
159 _TEST_SUITES) | 162 _TEST_SUITES) |
160 if test_suite in _TEST_SUITES: | 163 if test_suite in _TEST_SUITES: |
161 logging.critical('(Remember to include the path: out/Release/%s)', | 164 logging.critical('(Remember to include the path: out/Release/%s)', |
162 test_suite) | 165 test_suite) |
163 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')]) | 166 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')]) |
164 _TEST_SUITES = [test_suite] | 167 _TEST_SUITES = [test_suite] |
165 else: | 168 else: |
166 # If not specified, assume the test suites are in out/Release | 169 # If not specified, assume the test suites are in out/Release |
167 test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR, | 170 test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR, |
168 'out', 'Release')) | 171 'out', 'Release')) |
169 _TEST_SUITES = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES] | 172 _TEST_SUITES = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES] |
170 debug_info_list = [] | 173 debug_info_list = [] |
171 print _TEST_SUITES # So it shows up in buildbot output | 174 print _TEST_SUITES # So it shows up in buildbot output |
172 for t in _TEST_SUITES: | 175 for t in _TEST_SUITES: |
173 test = SingleTestRunner(device, t, gtest_filter, test_arguments, | 176 test = SingleTestRunner(device, t, gtest_filter, test_arguments, |
174 timeout, rebaseline, performance_test, | 177 timeout, rebaseline, performance_test, |
175 cleanup_test_files, tool, not not log_dump_name) | 178 cleanup_test_files, tool, not not log_dump_name, |
179 fast_and_loose=fast_and_loose) | |
176 test.RunTests() | 180 test.RunTests() |
177 results += [test.test_results] | 181 results += [test.test_results] |
178 # Collect debug info. | 182 # Collect debug info. |
179 debug_info_list += [test.dump_debug_info] | 183 debug_info_list += [test.dump_debug_info] |
180 if rebaseline: | 184 if rebaseline: |
181 test.UpdateFilter(test.test_results.failed) | 185 test.UpdateFilter(test.test_results.failed) |
182 elif test.test_results.failed: | 186 elif test.test_results.failed: |
183 # Stop running test if encountering failed test. | 187 # Stop running test if encountering failed test. |
184 test.test_results.LogFull() | 188 test.test_results.LogFull() |
185 break | 189 break |
186 # Zip all debug info outputs into a file named by log_dump_name. | 190 # Zip all debug info outputs into a file named by log_dump_name. |
187 debug_info.GTestDebugInfo.ZipAndCleanResults( | 191 debug_info.GTestDebugInfo.ZipAndCleanResults( |
188 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', | 192 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', |
189 'debug_info_dumps'), | 193 'debug_info_dumps'), |
190 log_dump_name, [d for d in debug_info_list if d]) | 194 log_dump_name, [d for d in debug_info_list if d]) |
191 return TestResults.FromTestResults(results) | 195 return TestResults.FromTestResults(results) |
192 | 196 |
197 | |
193 def Dispatch(options): | 198 def Dispatch(options): |
194 """Dispatches the tests, sharding if possible. | 199 """Dispatches the tests, sharding if possible. |
195 | 200 |
196 If options.use_emulator is True, all tests will be run in a new emulator | 201 If options.use_emulator is True, all tests will be run in a new emulator |
197 instance. | 202 instance. |
198 | 203 |
199 Args: | 204 Args: |
200 options: options for running the tests. | 205 options: options for running the tests. |
201 | 206 |
202 Returns: | 207 Returns: |
203 0 if successful, number of failing tests otherwise. | 208 0 if successful, number of failing tests otherwise. |
204 """ | 209 """ |
205 if options.test_suite == 'help': | 210 if options.test_suite == 'help': |
206 ListTestSuites() | 211 ListTestSuites() |
207 return 0 | 212 return 0 |
208 buildbot_emulator = None | 213 buildbot_emulator = None |
209 attached_devices = [] | 214 attached_devices = [] |
210 | 215 |
211 if options.use_xvfb: | 216 if options.use_xvfb: |
212 xvfb = Xvfb() | 217 xvfb = Xvfb() |
213 xvfb.Start() | 218 xvfb.Start() |
214 | 219 |
215 if options.use_emulator: | 220 if options.use_emulator: |
216 t = TimeProfile('Emulator launch') | 221 t = TimeProfile('Emulator launch') |
217 buildbot_emulator = emulator.Emulator() | 222 buildbot_emulator = emulator.Emulator(options.fast_and_loose) |
218 buildbot_emulator.Launch() | 223 buildbot_emulator.Launch() |
219 t.Stop() | 224 t.Stop() |
220 attached_devices.append(buildbot_emulator.device) | 225 attached_devices.append(buildbot_emulator.device) |
221 else: | 226 else: |
222 attached_devices = android_commands.GetAttachedDevices() | 227 attached_devices = android_commands.GetAttachedDevices() |
223 | 228 |
224 if not attached_devices: | 229 if not attached_devices: |
225 logging.critical('A device must be attached and online.') | 230 logging.critical('A device must be attached and online.') |
226 return 1 | 231 return 1 |
227 | 232 |
228 test_results = RunTests(attached_devices[0], options.test_suite, | 233 test_results = RunTests(attached_devices[0], options.test_suite, |
229 options.gtest_filter, options.test_arguments, | 234 options.gtest_filter, options.test_arguments, |
230 options.rebaseline, options.timeout, | 235 options.rebaseline, options.timeout, |
231 options.performance_test, | 236 options.performance_test, |
232 options.cleanup_test_files, options.tool, | 237 options.cleanup_test_files, options.tool, |
233 options.log_dump) | 238 options.log_dump, |
239 fast_and_loose=options.fast_and_loose) | |
234 if buildbot_emulator: | 240 if buildbot_emulator: |
235 buildbot_emulator.Shutdown() | 241 buildbot_emulator.Shutdown() |
236 if options.use_xvfb: | 242 if options.use_xvfb: |
237 xvfb.Stop() | 243 xvfb.Stop() |
238 | 244 |
239 return len(test_results.failed) | 245 # Another chance if we timed out? At this point It is safe(r) to |
246 # run fast and loose since we just uploaded all the test data and | |
247 # binary. | |
248 if test_results.timed_out and options.repeat: | |
249 logging.critical('Timed out; repeating in fast_and_loose mode.') | |
250 options.fast_and_loose = True | |
251 options.repeat = options.repeat - 1 | |
252 logging.critical('Repeats left: ' + str(options.repeat)) | |
253 return Dispatch(options) | |
254 else: | |
255 return len(test_results.failed) | |
240 | 256 |
Peter Beverloo
2012/01/12 11:19:18
nit: newline (since you fixed it elsewhere)
| |
241 def ListTestSuites(): | 257 def ListTestSuites(): |
242 """Display a list of available test suites | 258 """Display a list of available test suites |
243 """ | 259 """ |
244 print 'Available test suites are:' | 260 print 'Available test suites are:' |
245 for test_suite in _TEST_SUITES: | 261 for test_suite in _TEST_SUITES: |
246 print test_suite | 262 print test_suite |
247 | 263 |
248 | 264 |
249 def main(argv): | 265 def main(argv): |
250 option_parser = run_tests_helper.CreateTestRunnerOptionParser(None, | 266 option_parser = run_tests_helper.CreateTestRunnerOptionParser(None, |
251 default_timeout=0) | 267 default_timeout=0) |
252 option_parser.add_option('-s', dest='test_suite', | 268 option_parser.add_option('-s', dest='test_suite', |
253 help='Executable name of the test suite to run ' | 269 help='Executable name of the test suite to run ' |
254 '(use -s help to list them)') | 270 '(use -s help to list them)') |
255 option_parser.add_option('-r', dest='rebaseline', | 271 option_parser.add_option('-r', dest='rebaseline', |
256 help='Rebaseline and update *testsuite_disabled', | 272 help='Rebaseline and update *testsuite_disabled', |
257 action='store_true', | 273 action='store_true', |
258 default=False) | 274 default=False) |
259 option_parser.add_option('-f', dest='gtest_filter', | 275 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter', |
Peter Beverloo
2012/01/12 11:19:18
This seems unrelated.
John Grabowski
2012/01/12 18:34:17
Well, it makes use of "fast and loose" mode a lot
| |
260 help='gtest filter') | 276 help='gtest filter') |
261 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', | 277 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', |
262 help='Additional arguments to pass to the test') | 278 help='Additional arguments to pass to the test') |
263 option_parser.add_option('-p', dest='performance_test', | 279 option_parser.add_option('-p', dest='performance_test', |
264 help='Indicator of performance test', | 280 help='Indicator of performance test', |
265 action='store_true', | 281 action='store_true', |
266 default=False) | 282 default=False) |
267 option_parser.add_option('-L', dest='log_dump', | 283 option_parser.add_option('-L', dest='log_dump', |
268 help='file name of log dump, which will be put in' | 284 help='file name of log dump, which will be put in' |
269 'subfolder debug_info_dumps under the same directory' | 285 'subfolder debug_info_dumps under the same directory' |
270 'in where the test_suite exists.') | 286 'in where the test_suite exists.') |
271 option_parser.add_option('-e', '--emulator', dest='use_emulator', | 287 option_parser.add_option('-e', '--emulator', dest='use_emulator', |
272 help='Run tests in a new instance of emulator', | 288 help='Run tests in a new instance of emulator', |
273 action='store_true', | 289 action='store_true', |
274 default=False) | 290 default=False) |
275 option_parser.add_option('-x', '--xvfb', dest='use_xvfb', | 291 option_parser.add_option('-x', '--xvfb', dest='use_xvfb', |
276 action='store_true', default=False, | 292 action='store_true', default=False, |
277 help='Use Xvfb around tests (ignored if not Linux)') | 293 help='Use Xvfb around tests (ignored if not Linux)') |
294 option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose', | |
295 action='store_true', default=False, | |
296 help='Go faster (but be less stable), ' | |
297 'for quick testing. Example: when tracking down ' | |
298 'tests that hang to add to the disabled list, ' | |
299 'there is no need to redeploy the test binary ' | |
300 'or data to the device again. ' | |
301 'Don\'t use on bots!') | |
302 option_parser.add_option('--repeat', dest='repeat', type='int', | |
303 default=2, | |
304 help='Repeat count on test timeout') | |
278 options, args = option_parser.parse_args(argv) | 305 options, args = option_parser.parse_args(argv) |
279 if len(args) > 1: | 306 if len(args) > 1: |
280 print 'Unknown argument:', args[1:] | 307 print 'Unknown argument:', args[1:] |
281 option_parser.print_usage() | 308 option_parser.print_usage() |
282 sys.exit(1) | 309 sys.exit(1) |
283 run_tests_helper.SetLogLevel(options.verbose_count) | 310 run_tests_helper.SetLogLevel(options.verbose_count) |
284 return Dispatch(options) | 311 return Dispatch(options) |
285 | 312 |
286 | 313 |
287 if __name__ == '__main__': | 314 if __name__ == '__main__': |
288 sys.exit(main(sys.argv)) | 315 sys.exit(main(sys.argv)) |
OLD | NEW |