Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(859)

Side by Side Diff: build/android/run_tests.py

Issue 9104002: Add annotator tags for better debugging. More suppressions and other tweaks. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « build/android/gtest_filter/net_unittests_disabled ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runs all the native unit tests. 6 """Runs all the native unit tests.
7 7
8 1. Copy over test binary to /data/local on device. 8 1. Copy over test binary to /data/local on device.
9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) 9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
10 to be deployed to the device (in /data/local/tmp). 10 to be deployed to the device (in /data/local/tmp).
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
133 try: 133 try:
134 os.kill(self._pid, signal.SIGKILL) 134 os.kill(self._pid, signal.SIGKILL)
135 except: 135 except:
136 pass 136 pass
137 del os.environ['DISPLAY'] 137 del os.environ['DISPLAY']
138 self._pid = 0 138 self._pid = 0
139 139
140 140
141 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, 141 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline,
142 timeout, performance_test, cleanup_test_files, tool, 142 timeout, performance_test, cleanup_test_files, tool,
143 log_dump_name, fast_and_loose=False): 143 log_dump_name, fast_and_loose=False, annotate=False):
144 """Runs the tests. 144 """Runs the tests.
145 145
146 Args: 146 Args:
147 device: Device to run the tests. 147 device: Device to run the tests.
148 test_suite: A specific test suite to run, empty to run all. 148 test_suite: A specific test suite to run, empty to run all.
149 gtest_filter: A gtest_filter flag. 149 gtest_filter: A gtest_filter flag.
150 test_arguments: Additional arguments to pass to the test binary. 150 test_arguments: Additional arguments to pass to the test binary.
151 rebaseline: Whether or not to run tests in isolation and update the filter. 151 rebaseline: Whether or not to run tests in isolation and update the filter.
152 timeout: Timeout for each test. 152 timeout: Timeout for each test.
153 performance_test: Whether or not performance test(s). 153 performance_test: Whether or not performance test(s).
154 cleanup_test_files: Whether or not to cleanup test files on device. 154 cleanup_test_files: Whether or not to cleanup test files on device.
155 tool: Name of the Valgrind tool. 155 tool: Name of the Valgrind tool.
156 log_dump_name: Name of log dump file. 156 log_dump_name: Name of log dump file.
157 fast_and_loose: should we go extra-fast but sacrifice stability 157 fast_and_loose: should we go extra-fast but sacrifice stability
158 and/or correctness? Intended for quick cycle testing; not for bots! 158 and/or correctness? Intended for quick cycle testing; not for bots!
159 annotate: should we print buildbot-style annotations?
159 160
160 Returns: 161 Returns:
161 A TestResults object. 162 A TestResults object.
162 """ 163 """
163 results = [] 164 results = []
164 165
165 if test_suite: 166 if test_suite:
166 global _TEST_SUITES 167 global _TEST_SUITES
167 if not os.path.exists(test_suite): 168 if not os.path.exists(test_suite):
168 logging.critical('Unrecognized test suite %s, supported: %s' % 169 logging.critical('Unrecognized test suite %s, supported: %s' %
169 (test_suite, _TEST_SUITES)) 170 (test_suite, _TEST_SUITES))
170 if test_suite in _TEST_SUITES: 171 if test_suite in _TEST_SUITES:
171 logging.critical('(Remember to include the path: out/Release/%s)', 172 logging.critical('(Remember to include the path: out/Release/%s)',
172 test_suite) 173 test_suite)
173 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')]) 174 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')])
174 fully_qualified_test_suites = [test_suite] 175 fully_qualified_test_suites = [test_suite]
175 else: 176 else:
176 fully_qualified_test_suites = FullyQualifiedTestSuites() 177 fully_qualified_test_suites = FullyQualifiedTestSuites()
177 debug_info_list = [] 178 debug_info_list = []
178 print 'Known suites: ' + str(_TEST_SUITES) 179 print 'Known suites: ' + str(_TEST_SUITES)
179 print 'Running these: ' + str(fully_qualified_test_suites) 180 print 'Running these: ' + str(fully_qualified_test_suites)
180 for t in fully_qualified_test_suites: 181 for t in fully_qualified_test_suites:
182 if annotate:
183 print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t)
181 test = SingleTestRunner(device, t, gtest_filter, test_arguments, 184 test = SingleTestRunner(device, t, gtest_filter, test_arguments,
182 timeout, rebaseline, performance_test, 185 timeout, rebaseline, performance_test,
183 cleanup_test_files, tool, not not log_dump_name, 186 cleanup_test_files, tool, not not log_dump_name,
184 fast_and_loose=fast_and_loose) 187 fast_and_loose=fast_and_loose)
185 test.RunTests() 188 test.RunTests()
189
186 results += [test.test_results] 190 results += [test.test_results]
187 # Collect debug info. 191 # Collect debug info.
188 debug_info_list += [test.dump_debug_info] 192 debug_info_list += [test.dump_debug_info]
189 if rebaseline: 193 if rebaseline:
190 test.UpdateFilter(test.test_results.failed) 194 test.UpdateFilter(test.test_results.failed)
191 elif test.test_results.failed: 195 elif test.test_results.failed:
192 # Stop running test if encountering failed test.
193 test.test_results.LogFull() 196 test.test_results.LogFull()
194 break
195 # Zip all debug info outputs into a file named by log_dump_name. 197 # Zip all debug info outputs into a file named by log_dump_name.
196 debug_info.GTestDebugInfo.ZipAndCleanResults( 198 debug_info.GTestDebugInfo.ZipAndCleanResults(
197 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', 199 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release',
198 'debug_info_dumps'), 200 'debug_info_dumps'),
199 log_dump_name, [d for d in debug_info_list if d]) 201 log_dump_name, [d for d in debug_info_list if d])
202
203 if annotate:
204 if test.test_results.timed_out:
205 print '@@@STEP_WARNINGS@@@'
206 elif test.test_results.failed:
207 print '@@@STEP_FAILURE@@@'
208 else:
209 print 'Step success!' # No annotation needed
210
200 return TestResults.FromTestResults(results) 211 return TestResults.FromTestResults(results)
201 212
213
202 def _RunATestSuite(options): 214 def _RunATestSuite(options):
203 """Run a single test suite. 215 """Run a single test suite.
204 216
205 Helper for Dispatch() to allow stop/restart of the emulator across 217 Helper for Dispatch() to allow stop/restart of the emulator across
206 test bundles. If using the emulator, we start it on entry and stop 218 test bundles. If using the emulator, we start it on entry and stop
207 it on exit. 219 it on exit.
208 220
209 Args: 221 Args:
210 options: options for running the tests. 222 options: options for running the tests.
223 first_run: is this the first time we tried this test?
nyquist 2012/01/13 02:16:59 Where is this argument?
John Grabowski 2012/01/13 02:20:11 Dangling comment; removed
211 224
212 Returns: 225 Returns:
213 0 if successful, number of failing tests otherwise. 226 0 if successful, number of failing tests otherwise.
214 """ 227 """
215 attached_devices = [] 228 attached_devices = []
229 buildbot_emulator = None
230
216 if options.use_emulator: 231 if options.use_emulator:
217 t = TimeProfile('Emulator launch') 232 t = TimeProfile('Emulator launch')
218 buildbot_emulator = emulator.Emulator(options.fast_and_loose) 233 buildbot_emulator = emulator.Emulator(options.fast_and_loose)
219 buildbot_emulator.Launch() 234 buildbot_emulator.Launch()
220 t.Stop() 235 t.Stop()
221 attached_devices.append(buildbot_emulator.device) 236 attached_devices.append(buildbot_emulator.device)
222 else: 237 else:
223 attached_devices = android_commands.GetAttachedDevices() 238 attached_devices = android_commands.GetAttachedDevices()
224 239
225 if not attached_devices: 240 if not attached_devices:
226 logging.critical('A device must be attached and online.') 241 logging.critical('A device must be attached and online.')
227 return 1 242 return 1
228 243
229 test_results = RunTests(attached_devices[0], options.test_suite, 244 test_results = RunTests(attached_devices[0], options.test_suite,
230 options.gtest_filter, options.test_arguments, 245 options.gtest_filter, options.test_arguments,
231 options.rebaseline, options.timeout, 246 options.rebaseline, options.timeout,
232 options.performance_test, 247 options.performance_test,
233 options.cleanup_test_files, options.tool, 248 options.cleanup_test_files, options.tool,
234 options.log_dump, 249 options.log_dump,
235 fast_and_loose=options.fast_and_loose) 250 fast_and_loose=options.fast_and_loose,
251 annotate=options.annotate)
236 252
237 if buildbot_emulator: 253 if buildbot_emulator:
238 buildbot_emulator.Shutdown() 254 buildbot_emulator.Shutdown()
239 255
240 # Another chance if we timed out? At this point It is safe(r) to 256 # Another chance if we timed out? At this point It is safe(r) to
241 # run fast and loose since we just uploaded all the test data and 257 # run fast and loose since we just uploaded all the test data and
242 # binary. 258 # binary.
243 if test_results.timed_out and options.repeat: 259 if test_results.timed_out and options.repeat:
244 logging.critical('Timed out; repeating in fast_and_loose mode.') 260 logging.critical('Timed out; repeating in fast_and_loose mode.')
245 options.fast_and_loose = True 261 options.fast_and_loose = True
(...skipping 11 matching lines...) Expand all
257 273
258 Args: 274 Args:
259 options: options for running the tests. 275 options: options for running the tests.
260 276
261 Returns: 277 Returns:
262 0 if successful, number of failing tests otherwise. 278 0 if successful, number of failing tests otherwise.
263 """ 279 """
264 if options.test_suite == 'help': 280 if options.test_suite == 'help':
265 ListTestSuites() 281 ListTestSuites()
266 return 0 282 return 0
267 buildbot_emulator = None
268 283
269 if options.use_xvfb: 284 if options.use_xvfb:
270 xvfb = Xvfb() 285 xvfb = Xvfb()
271 xvfb.Start() 286 xvfb.Start()
272 287
273 all_test_suites = [options.test_suite] or FullyQualifiedTestSuites() 288 if options.test_suite:
289 all_test_suites = [options.test_suite]
290 else:
291 all_test_suites = FullyQualifiedTestSuites()
274 failures = 0 292 failures = 0
275 if options.use_emulator and options.restart_emulator_each_test: 293 for suite in all_test_suites:
276 for suite in all_test_suites: 294 options.test_suite = suite
277 options.test_suite = suite
278 failures += _RunATestSuite(options)
279 else:
280 failures += _RunATestSuite(options) 295 failures += _RunATestSuite(options)
281 296
282 if options.use_xvfb: 297 if options.use_xvfb:
283 xvfb.Stop() 298 xvfb.Stop()
284 return failures 299 return failures
285 300
286 301
287 def ListTestSuites(): 302 def ListTestSuites():
288 """Display a list of available test suites 303 """Display a list of available test suites
289 """ 304 """
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
325 action='store_true', default=False, 340 action='store_true', default=False,
326 help='Go faster (but be less stable), ' 341 help='Go faster (but be less stable), '
327 'for quick testing. Example: when tracking down ' 342 'for quick testing. Example: when tracking down '
328 'tests that hang to add to the disabled list, ' 343 'tests that hang to add to the disabled list, '
329 'there is no need to redeploy the test binary ' 344 'there is no need to redeploy the test binary '
330 'or data to the device again. ' 345 'or data to the device again. '
331 'Don\'t use on bots by default!') 346 'Don\'t use on bots by default!')
332 option_parser.add_option('--repeat', dest='repeat', type='int', 347 option_parser.add_option('--repeat', dest='repeat', type='int',
333 default=2, 348 default=2,
334 help='Repeat count on test timeout') 349 help='Repeat count on test timeout')
335 option_parser.add_option('--restart_emulator_each_test', 350 option_parser.add_option('--restart_emulator_each_test',
nyquist 2012/01/13 02:16:59 Is this option still needed?
John Grabowski 2012/01/13 02:20:11 Sadly, yes.
336 default='True', 351 default='True',
337 help='Restart the emulator for each test?') 352 help='Restart the emulator for each test?')
353 option_parser.add_option('--annotate', default=True,
354 help='Print buildbot-style annotate messages '
355 'for each test suite. Default=True')
338 options, args = option_parser.parse_args(argv) 356 options, args = option_parser.parse_args(argv)
339 if len(args) > 1: 357 if len(args) > 1:
340 print 'Unknown argument:', args[1:] 358 print 'Unknown argument:', args[1:]
341 option_parser.print_usage() 359 option_parser.print_usage()
342 sys.exit(1) 360 sys.exit(1)
343 run_tests_helper.SetLogLevel(options.verbose_count) 361 run_tests_helper.SetLogLevel(options.verbose_count)
344 return Dispatch(options) 362 return Dispatch(options)
345 363
346 364
347 if __name__ == '__main__': 365 if __name__ == '__main__':
348 sys.exit(main(sys.argv)) 366 sys.exit(main(sys.argv))
OLDNEW
« no previous file with comments | « build/android/gtest_filter/net_unittests_disabled ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698