Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(386)

Side by Side Diff: build/android/run_tests.py

Issue 10777017: Android: further simplication for test runners. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Patchset Created 8 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # 2 #
3 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 3 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be 4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file. 5 # found in the LICENSE file.
6 6
7 """Runs all the native unit tests. 7 """Runs all the native unit tests.
8 8
9 1. Copy over test binary to /data/local on device. 9 1. Copy over test binary to /data/local on device.
10 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) 10 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
73 'content_unittests', 73 'content_unittests',
74 'gpu_unittests', 74 'gpu_unittests',
75 'ipc_tests', 75 'ipc_tests',
76 'net_unittests', 76 'net_unittests',
77 'sql_unittests', 77 'sql_unittests',
78 'sync_unit_tests', 78 'sync_unit_tests',
79 'ui_unittests', 79 'ui_unittests',
80 ] 80 ]
81 81
82 82
83 def FullyQualifiedTestSuites(exe, test_suites): 83 def FullyQualifiedTestSuites(exe, option_test_suite):
84 """Return a fully qualified list that represents all known suites. 84 """Return a fully qualified list
85 85
86 Args: 86 Args:
87 exe: if True, use the executable-based test runner. 87 exe: if True, use the executable-based test runner.
88 test_suites: the source test suites to process. 88 option_test_suite: the test_suite specified as an option.
89 """ 89 """
90 # Assume the test suites are in out/Release. 90 # Assume the test suites are in out/Release.
91 test_suite_dir = os.path.abspath(os.path.join(constants.CHROME_DIR, 91 test_suite_dir = os.path.abspath(os.path.join(constants.CHROME_DIR,
92 'out', 'Release')) 92 'out', 'Release'))
93 if option_test_suite:
94 all_test_suites = [option_test_suite]
95 else:
96 all_test_suites = _TEST_SUITES
97
93 if exe: 98 if exe:
94 suites = [os.path.join(test_suite_dir, t) for t in test_suites] 99 qualified_test_suites = [os.path.join(test_suite_dir, t)
100 for t in all_test_suites]
95 else: 101 else:
96 # out/Release/$SUITE_apk/$SUITE-debug.apk 102 # out/Release/$SUITE_apk/$SUITE-debug.apk
97 suites = [os.path.join(test_suite_dir, 103 qualified_test_suites = [os.path.join(test_suite_dir,
98 t + '_apk', 104 t + '_apk',
99 t + '-debug.apk') 105 t + '-debug.apk')
100 for t in test_suites] 106 for t in all_test_suites]
101 return suites 107 for t, q in zip(all_test_suites, qualified_test_suites):
108 if not os.path.exists(q):
109 logging.critical('Test suite %s not found in %s.\n'
110 'Supported test suites:\n %s\n'
111 'Ensure it has been built.\n',
112 t, q, _TEST_SUITES)
113 return []
114 return qualified_test_suites
102 115
103 116
104 class TimeProfile(object): 117 class TimeProfile(object):
105 """Class for simple profiling of action, with logging of cost.""" 118 """Class for simple profiling of action, with logging of cost."""
106 119
107 def __init__(self, description): 120 def __init__(self, description):
108 self._description = description 121 self._description = description
109 self.Start() 122 self.Start()
110 123
111 def Start(self): 124 def Start(self):
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
168 181
169 def PrintAnnotationForTestResults(test_results): 182 def PrintAnnotationForTestResults(test_results):
170 if test_results.timed_out: 183 if test_results.timed_out:
171 buildbot_report.PrintWarning() 184 buildbot_report.PrintWarning()
172 elif test_results.failed or test_results.crashed or test_results.overall_fail: 185 elif test_results.failed or test_results.crashed or test_results.overall_fail:
173 buildbot_report.PrintError() 186 buildbot_report.PrintError()
174 else: 187 else:
175 print 'Step success!' # No annotation needed 188 print 'Step success!' # No annotation needed
176 189
177 190
178 def RunTests(exe, device, test_suite, gtest_filter, test_arguments, rebaseline,
179 timeout, performance_test, cleanup_test_files, tool,
180 log_dump_name, fast_and_loose):
181 """Runs the tests.
182
183 Args:
184 exe: boolean to state if we are using the exe based test runner
185 device: Device to run the tests.
186 test_suite: A specific test suite to run, empty to run all.
187 gtest_filter: A gtest_filter flag.
188 test_arguments: Additional arguments to pass to the test binary.
189 rebaseline: Whether or not to run tests in isolation and update the filter.
190 timeout: Timeout for each test.
191 performance_test: Whether or not performance test(s).
192 cleanup_test_files: Whether or not to cleanup test files on device.
193 tool: Name of the Valgrind tool.
194 log_dump_name: Name of log dump file.
195 fast_and_loose: if set, skip copying data files.
196
197 Returns:
198 A TestResults object.
199 """
200 results = []
201
202 if test_suite:
203 if not os.path.exists(test_suite):
204 logging.critical('Unrecognized test suite %s, supported: %s',
205 test_suite, _TEST_SUITES)
206 if test_suite in _TEST_SUITES:
207 logging.critical('(Remember to include the path: out/Release/%s)',
208 test_suite)
209 test_suite_basename = os.path.basename(test_suite)
210 if test_suite_basename in _TEST_SUITES:
211 logging.critical('Try "make -j15 %s"', test_suite_basename)
212 else:
213 logging.critical('Unrecognized test suite, supported: %s',
214 _TEST_SUITES)
215 return TestResults.FromRun([], [BaseTestResult(test_suite, '')],
216 False, False)
217 fully_qualified_test_suites = [test_suite]
218 else:
219 fully_qualified_test_suites = FullyQualifiedTestSuites(exe, _TEST_SUITES)
220 debug_info_list = []
221 print 'Known suites: ' + str(_TEST_SUITES)
222 print 'Running these: ' + str(fully_qualified_test_suites)
223 for t in fully_qualified_test_suites:
224 buildbot_report.PrintNamedStep('Test suite %s' % os.path.basename(t))
225 test = SingleTestRunner(device, t, gtest_filter, test_arguments,
226 timeout, rebaseline, performance_test,
227 cleanup_test_files, tool, 0, not not log_dump_name,
228 fast_and_loose)
229 test.Run()
230
231 results += [test.test_results]
232 # Collect debug info.
233 debug_info_list += [test.dump_debug_info]
234 if rebaseline:
235 test.UpdateFilter(test.test_results.failed)
236 test.test_results.LogFull('Unit test', os.path.basename(t))
237 # Zip all debug info outputs into a file named by log_dump_name.
238 debug_info.GTestDebugInfo.ZipAndCleanResults(
239 os.path.join(constants.CHROME_DIR, 'out', 'Release', 'debug_info_dumps'),
240 log_dump_name, [d for d in debug_info_list if d])
241
242 PrintAnnotationForTestResults(test.test_results)
243
244 return TestResults.FromTestResults(results)
245
246
247 class TestSharder(BaseTestSharder): 191 class TestSharder(BaseTestSharder):
248 """Responsible for sharding the tests on the connected devices.""" 192 """Responsible for sharding the tests on the connected devices."""
249 193
250 def __init__(self, attached_devices, test_suite, gtest_filter, 194 def __init__(self, attached_devices, test_suite, gtest_filter,
251 test_arguments, timeout, rebaseline, performance_test, 195 test_arguments, timeout, rebaseline, performance_test,
252 cleanup_test_files, tool, log_dump_name, fast_and_loose): 196 cleanup_test_files, tool, log_dump_name, fast_and_loose):
253 BaseTestSharder.__init__(self, attached_devices) 197 BaseTestSharder.__init__(self, attached_devices)
254 self.test_suite = test_suite 198 self.test_suite = test_suite
255 self.test_suite_basename = os.path.basename(test_suite) 199 self.test_suite_basename = os.path.basename(test_suite)
256 self.gtest_filter = gtest_filter 200 self.gtest_filter = gtest_filter or ''
257 self.test_arguments = test_arguments 201 self.test_arguments = test_arguments
258 self.timeout = timeout 202 self.timeout = timeout
259 self.rebaseline = rebaseline 203 self.rebaseline = rebaseline
260 self.performance_test = performance_test 204 self.performance_test = performance_test
261 self.cleanup_test_files = cleanup_test_files 205 self.cleanup_test_files = cleanup_test_files
262 self.tool = tool 206 self.tool = tool
263 self.log_dump_name = log_dump_name 207 self.log_dump_name = log_dump_name
264 self.fast_and_loose = fast_and_loose 208 self.fast_and_loose = fast_and_loose
265 test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter, 209 test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter,
266 test_arguments, timeout, rebaseline, 210 test_arguments, timeout, rebaseline,
267 performance_test, cleanup_test_files, tool, 0, 211 performance_test, cleanup_test_files, tool, 0,
268 not not self.log_dump_name, fast_and_loose) 212 not not self.log_dump_name, fast_and_loose)
269 # The executable/apk needs to be copied before we can call GetAllTests. 213 self.tests = []
270 test.test_package.StripAndCopyExecutable() 214 if not self.gtest_filter:
271 all_tests = test.test_package.GetAllTests() 215 # No filter has been specified, let's add all tests then.
272 if not rebaseline: 216 # The executable/apk needs to be copied before we can call GetAllTests.
273 disabled_list = test.GetDisabledTests() 217 test.test_package.StripAndCopyExecutable()
274 # Only includes tests that do not have any match in the disabled list. 218 all_tests = test.test_package.GetAllTests()
275 all_tests = filter(lambda t: 219 if not rebaseline:
276 not any([fnmatch.fnmatch(t, disabled_pattern) 220 disabled_list = test.GetDisabledTests()
277 for disabled_pattern in disabled_list]), 221 # Only includes tests that do not have any match in the disabled list.
278 all_tests) 222 all_tests = filter(lambda t:
279 self.tests = all_tests 223 not any([fnmatch.fnmatch(t, disabled_pattern)
224 for disabled_pattern in disabled_list]),
225 all_tests)
226 self.tests = all_tests
280 227
281 def CreateShardedTestRunner(self, device, index): 228 def CreateShardedTestRunner(self, device, index):
282 """Creates a suite-specific test runner. 229 """Creates a suite-specific test runner.
283 230
284 Args: 231 Args:
285 device: Device serial where this shard will run. 232 device: Device serial where this shard will run.
286 index: Index of this device in the pool. 233 index: Index of this device in the pool.
287 234
288 Returns: 235 Returns:
289 A SingleTestRunner object. 236 A SingleTestRunner object.
290 """ 237 """
291 device_num = len(self.attached_devices) 238 device_num = len(self.attached_devices)
292 shard_size = (len(self.tests) + device_num - 1) / device_num 239 shard_size = (len(self.tests) + device_num - 1) / device_num
293 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size] 240 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size]
294 test_filter = ':'.join(shard_test_list) 241 test_filter = ':'.join(shard_test_list) + self.gtest_filter
295 return SingleTestRunner(device, self.test_suite, 242 return SingleTestRunner(device, self.test_suite,
296 test_filter, self.test_arguments, self.timeout, 243 test_filter, self.test_arguments, self.timeout,
297 self.rebaseline, self.performance_test, 244 self.rebaseline, self.performance_test,
298 self.cleanup_test_files, self.tool, index, 245 self.cleanup_test_files, self.tool, index,
299 not not self.log_dump_name, self.fast_and_loose) 246 not not self.log_dump_name, self.fast_and_loose)
300 247
301 def OnTestsCompleted(self, test_runners, test_results): 248 def OnTestsCompleted(self, test_runners, test_results):
302 """Notifies that we completed the tests.""" 249 """Notifies that we completed the tests."""
303 test_results.LogFull('Unit test', os.path.basename(self.test_suite)) 250 test_results.LogFull('Unit test', os.path.basename(self.test_suite))
304 PrintAnnotationForTestResults(test_results) 251 PrintAnnotationForTestResults(test_results)
305 if test_results.failed and self.rebaseline: 252 if test_results.failed and self.rebaseline:
306 test_runners[0].UpdateFilter(test_results.failed) 253 test_runners[0].UpdateFilter(test_results.failed)
254 if self.log_dump_name:
255 # Zip all debug info outputs into a file named by log_dump_name.
256 debug_info.GTestDebugInfo.ZipAndCleanResults(
257 os.path.join(constants.CHROME_DIR, 'out', 'Release',
258 'debug_info_dumps'),
259 self.log_dump_name)
307 260
308 261
309 def _RunATestSuite(options): 262 def _RunATestSuite(options):
310 """Run a single test suite. 263 """Run a single test suite.
311 264
312 Helper for Dispatch() to allow stop/restart of the emulator across 265 Helper for Dispatch() to allow stop/restart of the emulator across
313 test bundles. If using the emulator, we start it on entry and stop 266 test bundles. If using the emulator, we start it on entry and stop
314 it on exit. 267 it on exit.
315 268
316 Args: 269 Args:
317 options: options for running the tests. 270 options: options for running the tests.
318 271
319 Returns: 272 Returns:
320 0 if successful, number of failing tests otherwise. 273 0 if successful, number of failing tests otherwise.
321 """ 274 """
275 buildbot_report.PrintNamedStep('Test suite %s' % options.test_suite)
322 attached_devices = [] 276 attached_devices = []
323 buildbot_emulators = [] 277 buildbot_emulators = []
324 278
325 if options.use_emulator: 279 if options.use_emulator:
326 for n in range(options.use_emulator): 280 for n in range(options.use_emulator):
327 t = TimeProfile('Emulator launch %d' % n) 281 t = TimeProfile('Emulator launch %d' % n)
328 buildbot_emulator = emulator.Emulator(options.fast_and_loose) 282 buildbot_emulator = emulator.Emulator(options.fast_and_loose)
329 buildbot_emulator.Launch(kill_all_emulators=n == 0) 283 buildbot_emulator.Launch(kill_all_emulators=n == 0)
330 t.Stop() 284 t.Stop()
331 buildbot_emulators.append(buildbot_emulator) 285 buildbot_emulators.append(buildbot_emulator)
332 attached_devices.append(buildbot_emulator.device) 286 attached_devices.append(buildbot_emulator.device)
333 # Wait for all emulators to boot completed. 287 # Wait for all emulators to boot completed.
334 map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True), 288 map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True),
335 buildbot_emulators) 289 buildbot_emulators)
336 elif options.test_device: 290 elif options.test_device:
337 attached_devices = [options.test_device] 291 attached_devices = [options.test_device]
338 else: 292 else:
339 attached_devices = android_commands.GetAttachedDevices() 293 attached_devices = android_commands.GetAttachedDevices()
340 294
341 if not attached_devices: 295 if not attached_devices:
342 logging.critical('A device must be attached and online.') 296 logging.critical('A device must be attached and online.')
343 buildbot_report.PrintError() 297 buildbot_report.PrintError()
344 return 1 298 return 1
345 299
346 # Reset the test port allocation. It's important to do it before starting 300 # Reset the test port allocation. It's important to do it before starting
347 # to dispatch any tests. 301 # to dispatch any tests.
348 if not ports.ResetTestServerPortAllocation(): 302 if not ports.ResetTestServerPortAllocation():
349 raise Exception('Failed to reset test server port.') 303 raise Exception('Failed to reset test server port.')
350 304
351 if (len(attached_devices) > 1 and options.test_suite and 305 if options.performance_test or options.gtest_filter:
352 not options.gtest_filter and not options.performance_test): 306 # These configuration can't be split in multiple devices.
353 sharder = TestSharder(attached_devices, options.test_suite, 307 attached_devices = [attached_devices[0]]
354 options.gtest_filter, options.test_arguments, 308 sharder = TestSharder(attached_devices, options.test_suite,
355 options.timeout, options.rebaseline, 309 options.gtest_filter, options.test_arguments,
356 options.performance_test, 310 options.timeout, options.rebaseline,
357 options.cleanup_test_files, options.tool, 311 options.performance_test,
358 options.log_dump, options.fast_and_loose) 312 options.cleanup_test_files, options.tool,
359 test_results = sharder.RunShardedTests() 313 options.log_dump, options.fast_and_loose)
360 else: 314 test_results = sharder.RunShardedTests()
361 test_results = RunTests(options.exe, attached_devices[0],
362 options.test_suite,
363 options.gtest_filter, options.test_arguments,
364 options.rebaseline, options.timeout,
365 options.performance_test,
366 options.cleanup_test_files, options.tool,
367 options.log_dump, options.fast_and_loose)
368 315
369 for buildbot_emulator in buildbot_emulators: 316 for buildbot_emulator in buildbot_emulators:
370 buildbot_emulator.Shutdown() 317 buildbot_emulator.Shutdown()
371 318
372 # Another chance if we timed out? At this point It is safe(r) to 319 # Another chance if we timed out? At this point It is safe(r) to
373 # run fast and loose since we just uploaded all the test data and 320 # run fast and loose since we just uploaded all the test data and
374 # binary. 321 # binary.
375 if test_results.timed_out and options.repeat: 322 if test_results.timed_out and options.repeat:
376 logging.critical('Timed out; repeating in fast_and_loose mode.') 323 logging.critical('Timed out; repeating in fast_and_loose mode.')
377 options.fast_and_loose = True 324 options.fast_and_loose = True
(...skipping 16 matching lines...) Expand all
394 0 if successful, number of failing tests otherwise. 341 0 if successful, number of failing tests otherwise.
395 """ 342 """
396 if options.test_suite == 'help': 343 if options.test_suite == 'help':
397 ListTestSuites() 344 ListTestSuites()
398 return 0 345 return 0
399 346
400 if options.use_xvfb: 347 if options.use_xvfb:
401 xvfb = Xvfb() 348 xvfb = Xvfb()
402 xvfb.Start() 349 xvfb.Start()
403 350
404 if options.test_suite: 351 all_test_suites = FullyQualifiedTestSuites(options.exe, options.test_suite)
405 all_test_suites = FullyQualifiedTestSuites(options.exe,
406 [options.test_suite])
407 else:
408 all_test_suites = FullyQualifiedTestSuites(options.exe,
409 _TEST_SUITES)
410 failures = 0 352 failures = 0
411 for suite in all_test_suites: 353 for suite in all_test_suites:
412 options.test_suite = suite 354 options.test_suite = suite
413 failures += _RunATestSuite(options) 355 failures += _RunATestSuite(options)
414 356
415 if options.use_xvfb: 357 if options.use_xvfb:
416 xvfb.Stop() 358 xvfb.Stop()
417 return failures 359 return failures
418 360
419 361
(...skipping 16 matching lines...) Expand all
436 help='Rebaseline and update *testsuite_disabled', 378 help='Rebaseline and update *testsuite_disabled',
437 action='store_true') 379 action='store_true')
438 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter', 380 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
439 help='gtest filter') 381 help='gtest filter')
440 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', 382 option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
441 help='Additional arguments to pass to the test') 383 help='Additional arguments to pass to the test')
442 option_parser.add_option('-p', dest='performance_test', 384 option_parser.add_option('-p', dest='performance_test',
443 help='Indicator of performance test', 385 help='Indicator of performance test',
444 action='store_true') 386 action='store_true')
445 option_parser.add_option('-L', dest='log_dump', 387 option_parser.add_option('-L', dest='log_dump',
446 help='file name of log dump, which will be put in' 388 help='file name of log dump, which will be put in '
447 'subfolder debug_info_dumps under the same directory' 389 'subfolder debug_info_dumps under the same '
448 'in where the test_suite exists.') 390 'directory in where the test_suite exists.')
449 option_parser.add_option('-e', '--emulator', dest='use_emulator', 391 option_parser.add_option('-e', '--emulator', dest='use_emulator',
450 help='Run tests in a new instance of emulator', 392 help='Run tests in a new instance of emulator',
451 type='int', 393 type='int',
452 default=0) 394 default=0)
453 option_parser.add_option('-x', '--xvfb', dest='use_xvfb', 395 option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
454 action='store_true', 396 action='store_true',
455 help='Use Xvfb around tests (ignored if not Linux)') 397 help='Use Xvfb around tests (ignored if not Linux)')
456 option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose', 398 option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose',
457 action='store_true', 399 action='store_true',
458 help='Go faster (but be less stable), ' 400 help='Go faster (but be less stable), '
(...skipping 26 matching lines...) Expand all
485 # the batch (this happens because the exit status is a sum of all failures 427 # the batch (this happens because the exit status is a sum of all failures
486 # from all suites, but the buildbot associates the exit status only with the 428 # from all suites, but the buildbot associates the exit status only with the
487 # most recent step). 429 # most recent step).
488 if options.exit_code: 430 if options.exit_code:
489 return failed_tests_count 431 return failed_tests_count
490 return 0 432 return 0
491 433
492 434
493 if __name__ == '__main__': 435 if __name__ == '__main__':
494 sys.exit(main(sys.argv)) 436 sys.exit(main(sys.argv))
OLDNEW
« build/android/pylib/debug_info.py ('K') | « build/android/pylib/single_test_runner.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698