OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. | 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. |
3 # Use of this source code is governed by a BSD-style license that can be | 3 # Use of this source code is governed by a BSD-style license that can be |
4 # found in the LICENSE file. | 4 # found in the LICENSE file. |
5 | 5 |
6 """Runs all the native unit tests. | 6 """Runs all the native unit tests. |
7 | 7 |
8 1. Copy over test binary to /data/local on device. | 8 1. Copy over test binary to /data/local on device. |
9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) | 9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) |
10 to be deployed to the device (in /data/local/tmp). | 10 to be deployed to the device (in /data/local/tmp). |
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
133 try: | 133 try: |
134 os.kill(self._pid, signal.SIGKILL) | 134 os.kill(self._pid, signal.SIGKILL) |
135 except: | 135 except: |
136 pass | 136 pass |
137 del os.environ['DISPLAY'] | 137 del os.environ['DISPLAY'] |
138 self._pid = 0 | 138 self._pid = 0 |
139 | 139 |
140 | 140 |
141 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, | 141 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, |
142 timeout, performance_test, cleanup_test_files, tool, | 142 timeout, performance_test, cleanup_test_files, tool, |
143 log_dump_name, fast_and_loose=False): | 143 log_dump_name, fast_and_loose=False, annotate=False): |
144 """Runs the tests. | 144 """Runs the tests. |
145 | 145 |
146 Args: | 146 Args: |
147 device: Device to run the tests. | 147 device: Device to run the tests. |
148 test_suite: A specific test suite to run, empty to run all. | 148 test_suite: A specific test suite to run, empty to run all. |
149 gtest_filter: A gtest_filter flag. | 149 gtest_filter: A gtest_filter flag. |
150 test_arguments: Additional arguments to pass to the test binary. | 150 test_arguments: Additional arguments to pass to the test binary. |
151 rebaseline: Whether or not to run tests in isolation and update the filter. | 151 rebaseline: Whether or not to run tests in isolation and update the filter. |
152 timeout: Timeout for each test. | 152 timeout: Timeout for each test. |
153 performance_test: Whether or not performance test(s). | 153 performance_test: Whether or not performance test(s). |
154 cleanup_test_files: Whether or not to cleanup test files on device. | 154 cleanup_test_files: Whether or not to cleanup test files on device. |
155 tool: Name of the Valgrind tool. | 155 tool: Name of the Valgrind tool. |
156 log_dump_name: Name of log dump file. | 156 log_dump_name: Name of log dump file. |
157 fast_and_loose: should we go extra-fast but sacrifice stability | 157 fast_and_loose: should we go extra-fast but sacrifice stability |
158 and/or correctness? Intended for quick cycle testing; not for bots! | 158 and/or correctness? Intended for quick cycle testing; not for bots! |
| 159 annotate: should we print buildbot-style annotations? |
159 | 160 |
160 Returns: | 161 Returns: |
161 A TestResults object. | 162 A TestResults object. |
162 """ | 163 """ |
163 results = [] | 164 results = [] |
164 | 165 |
165 if test_suite: | 166 if test_suite: |
166 global _TEST_SUITES | 167 global _TEST_SUITES |
167 if not os.path.exists(test_suite): | 168 if not os.path.exists(test_suite): |
168 logging.critical('Unrecognized test suite %s, supported: %s' % | 169 logging.critical('Unrecognized test suite %s, supported: %s' % |
169 (test_suite, _TEST_SUITES)) | 170 (test_suite, _TEST_SUITES)) |
170 if test_suite in _TEST_SUITES: | 171 if test_suite in _TEST_SUITES: |
171 logging.critical('(Remember to include the path: out/Release/%s)', | 172 logging.critical('(Remember to include the path: out/Release/%s)', |
172 test_suite) | 173 test_suite) |
173 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')]) | 174 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')]) |
174 fully_qualified_test_suites = [test_suite] | 175 fully_qualified_test_suites = [test_suite] |
175 else: | 176 else: |
176 fully_qualified_test_suites = FullyQualifiedTestSuites() | 177 fully_qualified_test_suites = FullyQualifiedTestSuites() |
177 debug_info_list = [] | 178 debug_info_list = [] |
178 print 'Known suites: ' + str(_TEST_SUITES) | 179 print 'Known suites: ' + str(_TEST_SUITES) |
179 print 'Running these: ' + str(fully_qualified_test_suites) | 180 print 'Running these: ' + str(fully_qualified_test_suites) |
180 for t in fully_qualified_test_suites: | 181 for t in fully_qualified_test_suites: |
| 182 if annotate: |
| 183 print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t) |
181 test = SingleTestRunner(device, t, gtest_filter, test_arguments, | 184 test = SingleTestRunner(device, t, gtest_filter, test_arguments, |
182 timeout, rebaseline, performance_test, | 185 timeout, rebaseline, performance_test, |
183 cleanup_test_files, tool, not not log_dump_name, | 186 cleanup_test_files, tool, not not log_dump_name, |
184 fast_and_loose=fast_and_loose) | 187 fast_and_loose=fast_and_loose) |
185 test.RunTests() | 188 test.RunTests() |
| 189 |
186 results += [test.test_results] | 190 results += [test.test_results] |
187 # Collect debug info. | 191 # Collect debug info. |
188 debug_info_list += [test.dump_debug_info] | 192 debug_info_list += [test.dump_debug_info] |
189 if rebaseline: | 193 if rebaseline: |
190 test.UpdateFilter(test.test_results.failed) | 194 test.UpdateFilter(test.test_results.failed) |
191 elif test.test_results.failed: | 195 elif test.test_results.failed: |
192 # Stop running test if encountering failed test. | |
193 test.test_results.LogFull() | 196 test.test_results.LogFull() |
194 break | |
195 # Zip all debug info outputs into a file named by log_dump_name. | 197 # Zip all debug info outputs into a file named by log_dump_name. |
196 debug_info.GTestDebugInfo.ZipAndCleanResults( | 198 debug_info.GTestDebugInfo.ZipAndCleanResults( |
197 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', | 199 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', |
198 'debug_info_dumps'), | 200 'debug_info_dumps'), |
199 log_dump_name, [d for d in debug_info_list if d]) | 201 log_dump_name, [d for d in debug_info_list if d]) |
| 202 |
| 203 if annotate: |
| 204 if test.test_results.timed_out: |
| 205 print '@@@STEP_WARNINGS@@@' |
| 206 elif test.test_results.failed: |
| 207 print '@@@STEP_FAILURE@@@' |
| 208 else: |
| 209 print 'Step success!' # No annotation needed |
| 210 |
200 return TestResults.FromTestResults(results) | 211 return TestResults.FromTestResults(results) |
201 | 212 |
| 213 |
202 def _RunATestSuite(options): | 214 def _RunATestSuite(options): |
203 """Run a single test suite. | 215 """Run a single test suite. |
204 | 216 |
205 Helper for Dispatch() to allow stop/restart of the emulator across | 217 Helper for Dispatch() to allow stop/restart of the emulator across |
206 test bundles. If using the emulator, we start it on entry and stop | 218 test bundles. If using the emulator, we start it on entry and stop |
207 it on exit. | 219 it on exit. |
208 | 220 |
209 Args: | 221 Args: |
210 options: options for running the tests. | 222 options: options for running the tests. |
211 | 223 |
212 Returns: | 224 Returns: |
213 0 if successful, number of failing tests otherwise. | 225 0 if successful, number of failing tests otherwise. |
214 """ | 226 """ |
215 attached_devices = [] | 227 attached_devices = [] |
| 228 buildbot_emulator = None |
| 229 |
216 if options.use_emulator: | 230 if options.use_emulator: |
217 t = TimeProfile('Emulator launch') | 231 t = TimeProfile('Emulator launch') |
218 buildbot_emulator = emulator.Emulator(options.fast_and_loose) | 232 buildbot_emulator = emulator.Emulator(options.fast_and_loose) |
219 buildbot_emulator.Launch() | 233 buildbot_emulator.Launch() |
220 t.Stop() | 234 t.Stop() |
221 attached_devices.append(buildbot_emulator.device) | 235 attached_devices.append(buildbot_emulator.device) |
222 else: | 236 else: |
223 attached_devices = android_commands.GetAttachedDevices() | 237 attached_devices = android_commands.GetAttachedDevices() |
224 | 238 |
225 if not attached_devices: | 239 if not attached_devices: |
226 logging.critical('A device must be attached and online.') | 240 logging.critical('A device must be attached and online.') |
227 return 1 | 241 return 1 |
228 | 242 |
229 test_results = RunTests(attached_devices[0], options.test_suite, | 243 test_results = RunTests(attached_devices[0], options.test_suite, |
230 options.gtest_filter, options.test_arguments, | 244 options.gtest_filter, options.test_arguments, |
231 options.rebaseline, options.timeout, | 245 options.rebaseline, options.timeout, |
232 options.performance_test, | 246 options.performance_test, |
233 options.cleanup_test_files, options.tool, | 247 options.cleanup_test_files, options.tool, |
234 options.log_dump, | 248 options.log_dump, |
235 fast_and_loose=options.fast_and_loose) | 249 fast_and_loose=options.fast_and_loose, |
| 250 annotate=options.annotate) |
236 | 251 |
237 if buildbot_emulator: | 252 if buildbot_emulator: |
238 buildbot_emulator.Shutdown() | 253 buildbot_emulator.Shutdown() |
239 | 254 |
240 # Another chance if we timed out? At this point It is safe(r) to | 255 # Another chance if we timed out? At this point It is safe(r) to |
241 # run fast and loose since we just uploaded all the test data and | 256 # run fast and loose since we just uploaded all the test data and |
242 # binary. | 257 # binary. |
243 if test_results.timed_out and options.repeat: | 258 if test_results.timed_out and options.repeat: |
244 logging.critical('Timed out; repeating in fast_and_loose mode.') | 259 logging.critical('Timed out; repeating in fast_and_loose mode.') |
245 options.fast_and_loose = True | 260 options.fast_and_loose = True |
(...skipping 11 matching lines...) Expand all Loading... |
257 | 272 |
258 Args: | 273 Args: |
259 options: options for running the tests. | 274 options: options for running the tests. |
260 | 275 |
261 Returns: | 276 Returns: |
262 0 if successful, number of failing tests otherwise. | 277 0 if successful, number of failing tests otherwise. |
263 """ | 278 """ |
264 if options.test_suite == 'help': | 279 if options.test_suite == 'help': |
265 ListTestSuites() | 280 ListTestSuites() |
266 return 0 | 281 return 0 |
267 buildbot_emulator = None | |
268 | 282 |
269 if options.use_xvfb: | 283 if options.use_xvfb: |
270 xvfb = Xvfb() | 284 xvfb = Xvfb() |
271 xvfb.Start() | 285 xvfb.Start() |
272 | 286 |
273 all_test_suites = [options.test_suite] or FullyQualifiedTestSuites() | 287 if options.test_suite: |
| 288 all_test_suites = [options.test_suite] |
| 289 else: |
| 290 all_test_suites = FullyQualifiedTestSuites() |
274 failures = 0 | 291 failures = 0 |
275 if options.use_emulator and options.restart_emulator_each_test: | 292 for suite in all_test_suites: |
276 for suite in all_test_suites: | 293 options.test_suite = suite |
277 options.test_suite = suite | |
278 failures += _RunATestSuite(options) | |
279 else: | |
280 failures += _RunATestSuite(options) | 294 failures += _RunATestSuite(options) |
281 | 295 |
282 if options.use_xvfb: | 296 if options.use_xvfb: |
283 xvfb.Stop() | 297 xvfb.Stop() |
284 return failures | 298 return failures |
285 | 299 |
286 | 300 |
287 def ListTestSuites(): | 301 def ListTestSuites(): |
288 """Display a list of available test suites | 302 """Display a list of available test suites |
289 """ | 303 """ |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
325 action='store_true', default=False, | 339 action='store_true', default=False, |
326 help='Go faster (but be less stable), ' | 340 help='Go faster (but be less stable), ' |
327 'for quick testing. Example: when tracking down ' | 341 'for quick testing. Example: when tracking down ' |
328 'tests that hang to add to the disabled list, ' | 342 'tests that hang to add to the disabled list, ' |
329 'there is no need to redeploy the test binary ' | 343 'there is no need to redeploy the test binary ' |
330 'or data to the device again. ' | 344 'or data to the device again. ' |
331 'Don\'t use on bots by default!') | 345 'Don\'t use on bots by default!') |
332 option_parser.add_option('--repeat', dest='repeat', type='int', | 346 option_parser.add_option('--repeat', dest='repeat', type='int', |
333 default=2, | 347 default=2, |
334 help='Repeat count on test timeout') | 348 help='Repeat count on test timeout') |
335 option_parser.add_option('--restart_emulator_each_test', | 349 option_parser.add_option('--annotate', default=True, |
336 default='True', | 350 help='Print buildbot-style annotate messages ' |
337 help='Restart the emulator for each test?') | 351 'for each test suite. Default=True') |
338 options, args = option_parser.parse_args(argv) | 352 options, args = option_parser.parse_args(argv) |
339 if len(args) > 1: | 353 if len(args) > 1: |
340 print 'Unknown argument:', args[1:] | 354 print 'Unknown argument:', args[1:] |
341 option_parser.print_usage() | 355 option_parser.print_usage() |
342 sys.exit(1) | 356 sys.exit(1) |
343 run_tests_helper.SetLogLevel(options.verbose_count) | 357 run_tests_helper.SetLogLevel(options.verbose_count) |
344 return Dispatch(options) | 358 return Dispatch(options) |
345 | 359 |
346 | 360 |
347 if __name__ == '__main__': | 361 if __name__ == '__main__': |
348 sys.exit(main(sys.argv)) | 362 sys.exit(main(sys.argv)) |
OLD | NEW |