OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Runs all the native unit tests. | 6 """Runs all the native unit tests. |
7 | 7 |
8 1. Copy over test binary to /data/local on device. | 8 1. Copy over test binary to /data/local on device. |
9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) | 9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) |
10 to be deployed to the device (in /data/local/tmp). | 10 to be deployed to the device (in /data/local/tmp). |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
59 import debug_info | 59 import debug_info |
60 import emulator | 60 import emulator |
61 import run_tests_helper | 61 import run_tests_helper |
62 from single_test_runner import SingleTestRunner | 62 from single_test_runner import SingleTestRunner |
63 from test_package_executable import TestPackageExecutable | 63 from test_package_executable import TestPackageExecutable |
64 from test_result import BaseTestResult, TestResults | 64 from test_result import BaseTestResult, TestResults |
65 | 65 |
66 _TEST_SUITES = ['base_unittests', 'sql_unittests', 'ipc_tests', 'net_unittests'] | 66 _TEST_SUITES = ['base_unittests', 'sql_unittests', 'ipc_tests', 'net_unittests'] |
67 | 67 |
68 | 68 |
| 69 def FullyQualifiedTestSuites(): |
| 70 """Return a fully qualified list that represents all known suites.""" |
| 71 # If not specified, assume the test suites are in out/Release |
| 72 test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR, |
| 73 'out', 'Release')) |
| 74 return [os.path.join(test_suite_dir, t) for t in _TEST_SUITES] |
| 75 |
| 76 |
69 class TimeProfile(object): | 77 class TimeProfile(object): |
70 """Class for simple profiling of action, with logging of cost.""" | 78 """Class for simple profiling of action, with logging of cost.""" |
71 | 79 |
72 def __init__(self, description): | 80 def __init__(self, description): |
73 self._description = description | 81 self._description = description |
74 self.Start() | 82 self.Start() |
75 | 83 |
76 def Start(self): | 84 def Start(self): |
77 self._starttime = time.time() | 85 self._starttime = time.time() |
78 | 86 |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
150 and/or correctness? Intended for quick cycle testing; not for bots! | 158 and/or correctness? Intended for quick cycle testing; not for bots! |
151 | 159 |
152 Returns: | 160 Returns: |
153 A TestResults object. | 161 A TestResults object. |
154 """ | 162 """ |
155 results = [] | 163 results = [] |
156 | 164 |
157 if test_suite: | 165 if test_suite: |
158 global _TEST_SUITES | 166 global _TEST_SUITES |
159 if not os.path.exists(test_suite): | 167 if not os.path.exists(test_suite): |
160 logging.critical('Unrecognized test suite, supported: %s' % | 168 logging.critical('Unrecognized test suite %s, supported: %s' % |
161 _TEST_SUITES) | 169 (test_suite, _TEST_SUITES)) |
162 if test_suite in _TEST_SUITES: | 170 if test_suite in _TEST_SUITES: |
163 logging.critical('(Remember to include the path: out/Release/%s)', | 171 logging.critical('(Remember to include the path: out/Release/%s)', |
164 test_suite) | 172 test_suite) |
165 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')]) | 173 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')]) |
166 _TEST_SUITES = [test_suite] | 174 fully_qualified_test_suites = [test_suite] |
167 else: | 175 else: |
168 # If not specified, assume the test suites are in out/Release | 176 fully_qualified_test_suites = FullyQualifiedTestSuites() |
169 test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR, | |
170 'out', 'Release')) | |
171 _TEST_SUITES = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES] | |
172 debug_info_list = [] | 177 debug_info_list = [] |
173 print _TEST_SUITES # So it shows up in buildbot output | 178 print 'Known suites: ' + str(_TEST_SUITES) |
174 for t in _TEST_SUITES: | 179 print 'Running these: ' + str(fully_qualified_test_suites) |
| 180 for t in fully_qualified_test_suites: |
175 test = SingleTestRunner(device, t, gtest_filter, test_arguments, | 181 test = SingleTestRunner(device, t, gtest_filter, test_arguments, |
176 timeout, rebaseline, performance_test, | 182 timeout, rebaseline, performance_test, |
177 cleanup_test_files, tool, not not log_dump_name, | 183 cleanup_test_files, tool, not not log_dump_name, |
178 fast_and_loose=fast_and_loose) | 184 fast_and_loose=fast_and_loose) |
179 test.RunTests() | 185 test.RunTests() |
180 results += [test.test_results] | 186 results += [test.test_results] |
181 # Collect debug info. | 187 # Collect debug info. |
182 debug_info_list += [test.dump_debug_info] | 188 debug_info_list += [test.dump_debug_info] |
183 if rebaseline: | 189 if rebaseline: |
184 test.UpdateFilter(test.test_results.failed) | 190 test.UpdateFilter(test.test_results.failed) |
185 elif test.test_results.failed: | 191 elif test.test_results.failed: |
186 # Stop running test if encountering failed test. | 192 # Stop running test if encountering failed test. |
187 test.test_results.LogFull() | 193 test.test_results.LogFull() |
188 break | 194 break |
189 # Zip all debug info outputs into a file named by log_dump_name. | 195 # Zip all debug info outputs into a file named by log_dump_name. |
190 debug_info.GTestDebugInfo.ZipAndCleanResults( | 196 debug_info.GTestDebugInfo.ZipAndCleanResults( |
191 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', | 197 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', |
192 'debug_info_dumps'), | 198 'debug_info_dumps'), |
193 log_dump_name, [d for d in debug_info_list if d]) | 199 log_dump_name, [d for d in debug_info_list if d]) |
194 return TestResults.FromTestResults(results) | 200 return TestResults.FromTestResults(results) |
195 | 201 |
| 202 def _RunATestSuite(options): |
| 203 """Run a single test suite. |
196 | 204 |
197 def Dispatch(options): | 205 Helper for Dispatch() to allow stop/restart of the emulator across |
198 """Dispatches the tests, sharding if possible. | 206 test bundles. If using the emulator, we start it on entry and stop |
199 | 207 it on exit. |
200 If options.use_emulator is True, all tests will be run in a new emulator | |
201 instance. | |
202 | 208 |
203 Args: | 209 Args: |
204 options: options for running the tests. | 210 options: options for running the tests. |
205 | 211 |
206 Returns: | 212 Returns: |
207 0 if successful, number of failing tests otherwise. | 213 0 if successful, number of failing tests otherwise. |
208 """ | 214 """ |
209 if options.test_suite == 'help': | |
210 ListTestSuites() | |
211 return 0 | |
212 buildbot_emulator = None | |
213 attached_devices = [] | 215 attached_devices = [] |
214 | |
215 if options.use_xvfb: | |
216 xvfb = Xvfb() | |
217 xvfb.Start() | |
218 | |
219 if options.use_emulator: | 216 if options.use_emulator: |
220 t = TimeProfile('Emulator launch') | 217 t = TimeProfile('Emulator launch') |
221 buildbot_emulator = emulator.Emulator(options.fast_and_loose) | 218 buildbot_emulator = emulator.Emulator(options.fast_and_loose) |
222 buildbot_emulator.Launch() | 219 buildbot_emulator.Launch() |
223 t.Stop() | 220 t.Stop() |
224 attached_devices.append(buildbot_emulator.device) | 221 attached_devices.append(buildbot_emulator.device) |
225 else: | 222 else: |
226 attached_devices = android_commands.GetAttachedDevices() | 223 attached_devices = android_commands.GetAttachedDevices() |
227 | 224 |
228 if not attached_devices: | 225 if not attached_devices: |
229 logging.critical('A device must be attached and online.') | 226 logging.critical('A device must be attached and online.') |
230 return 1 | 227 return 1 |
231 | 228 |
232 test_results = RunTests(attached_devices[0], options.test_suite, | 229 test_results = RunTests(attached_devices[0], options.test_suite, |
233 options.gtest_filter, options.test_arguments, | 230 options.gtest_filter, options.test_arguments, |
234 options.rebaseline, options.timeout, | 231 options.rebaseline, options.timeout, |
235 options.performance_test, | 232 options.performance_test, |
236 options.cleanup_test_files, options.tool, | 233 options.cleanup_test_files, options.tool, |
237 options.log_dump, | 234 options.log_dump, |
238 fast_and_loose=options.fast_and_loose) | 235 fast_and_loose=options.fast_and_loose) |
| 236 |
239 if buildbot_emulator: | 237 if buildbot_emulator: |
240 buildbot_emulator.Shutdown() | 238 buildbot_emulator.Shutdown() |
241 if options.use_xvfb: | |
242 xvfb.Stop() | |
243 | 239 |
244 # Another chance if we timed out? At this point It is safe(r) to | 240 # Another chance if we timed out? At this point It is safe(r) to |
245 # run fast and loose since we just uploaded all the test data and | 241 # run fast and loose since we just uploaded all the test data and |
246 # binary. | 242 # binary. |
247 if test_results.timed_out and options.repeat: | 243 if test_results.timed_out and options.repeat: |
248 logging.critical('Timed out; repeating in fast_and_loose mode.') | 244 logging.critical('Timed out; repeating in fast_and_loose mode.') |
249 options.fast_and_loose = True | 245 options.fast_and_loose = True |
250 options.repeat = options.repeat - 1 | 246 options.repeat = options.repeat - 1 |
251 logging.critical('Repeats left: ' + str(options.repeat)) | 247 logging.critical('Repeats left: ' + str(options.repeat)) |
252 return Dispatch(options) | 248 return _RunATestSuite(options) |
| 249 return len(test_results.failed) |
| 250 |
| 251 |
| 252 def Dispatch(options): |
| 253 """Dispatches the tests, sharding if possible. |
| 254 |
| 255 If options.use_emulator is True, all tests will be run in a new emulator |
| 256 instance. |
| 257 |
| 258 Args: |
| 259 options: options for running the tests. |
| 260 |
| 261 Returns: |
| 262 0 if successful, number of failing tests otherwise. |
| 263 """ |
| 264 if options.test_suite == 'help': |
| 265 ListTestSuites() |
| 266 return 0 |
| 267 buildbot_emulator = None |
| 268 |
| 269 if options.use_xvfb: |
| 270 xvfb = Xvfb() |
| 271 xvfb.Start() |
| 272 |
| 273 all_test_suites = [options.test_suite] or FullyQualifiedTestSuites() |
| 274 failures = 0 |
| 275 if options.use_emulator and options.restart_emulator_each_test: |
| 276 for suite in all_test_suites: |
| 277 options.test_suite = suite |
| 278 failures += _RunATestSuite(options) |
253 else: | 279 else: |
254 return len(test_results.failed) | 280 failures += _RunATestSuite(options) |
| 281 |
| 282 if options.use_xvfb: |
| 283 xvfb.Stop() |
| 284 return failures |
255 | 285 |
256 | 286 |
257 def ListTestSuites(): | 287 def ListTestSuites(): |
258 """Display a list of available test suites | 288 """Display a list of available test suites |
259 """ | 289 """ |
260 print 'Available test suites are:' | 290 print 'Available test suites are:' |
261 for test_suite in _TEST_SUITES: | 291 for test_suite in _TEST_SUITES: |
262 print test_suite | 292 print test_suite |
263 | 293 |
264 | 294 |
265 def main(argv): | 295 def main(argv): |
266 option_parser = run_tests_helper.CreateTestRunnerOptionParser(None, | 296 option_parser = run_tests_helper.CreateTestRunnerOptionParser(None, |
267 default_timeout=0) | 297 default_timeout=0) |
268 option_parser.add_option('-s', dest='test_suite', | 298 option_parser.add_option('-s', '--suite', dest='test_suite', |
269 help='Executable name of the test suite to run ' | 299 help='Executable name of the test suite to run ' |
270 '(use -s help to list them)') | 300 '(use -s help to list them)') |
271 option_parser.add_option('-r', dest='rebaseline', | 301 option_parser.add_option('-r', dest='rebaseline', |
272 help='Rebaseline and update *testsuite_disabled', | 302 help='Rebaseline and update *testsuite_disabled', |
273 action='store_true', | 303 action='store_true', |
274 default=False) | 304 default=False) |
275 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter', | 305 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter', |
276 help='gtest filter') | 306 help='gtest filter') |
277 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', | 307 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', |
278 help='Additional arguments to pass to the test') | 308 help='Additional arguments to pass to the test') |
(...skipping 16 matching lines...) Expand all Loading... |
295 action='store_true', default=False, | 325 action='store_true', default=False, |
296 help='Go faster (but be less stable), ' | 326 help='Go faster (but be less stable), ' |
297 'for quick testing. Example: when tracking down ' | 327 'for quick testing. Example: when tracking down ' |
298 'tests that hang to add to the disabled list, ' | 328 'tests that hang to add to the disabled list, ' |
299 'there is no need to redeploy the test binary ' | 329 'there is no need to redeploy the test binary ' |
300 'or data to the device again. ' | 330 'or data to the device again. ' |
301 'Don\'t use on bots by default!') | 331 'Don\'t use on bots by default!') |
302 option_parser.add_option('--repeat', dest='repeat', type='int', | 332 option_parser.add_option('--repeat', dest='repeat', type='int', |
303 default=2, | 333 default=2, |
304 help='Repeat count on test timeout') | 334 help='Repeat count on test timeout') |
| 335 option_parser.add_option('--restart_emulator_each_test', |
| 336 default='True', |
| 337 help='Restart the emulator for each test?') |
305 options, args = option_parser.parse_args(argv) | 338 options, args = option_parser.parse_args(argv) |
306 if len(args) > 1: | 339 if len(args) > 1: |
307 print 'Unknown argument:', args[1:] | 340 print 'Unknown argument:', args[1:] |
308 option_parser.print_usage() | 341 option_parser.print_usage() |
309 sys.exit(1) | 342 sys.exit(1) |
310 run_tests_helper.SetLogLevel(options.verbose_count) | 343 run_tests_helper.SetLogLevel(options.verbose_count) |
311 return Dispatch(options) | 344 return Dispatch(options) |
312 | 345 |
313 | 346 |
314 if __name__ == '__main__': | 347 if __name__ == '__main__': |
315 sys.exit(main(sys.argv)) | 348 sys.exit(main(sys.argv)) |
OLD | NEW |