Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(200)

Side by Side Diff: build/android/run_tests.py

Issue 10777017: Android: further simplication for test runners. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Nit on comment Created 8 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « build/android/pylib/single_test_runner.py ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # 2 #
3 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 3 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be 4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file. 5 # found in the LICENSE file.
6 6
7 """Runs all the native unit tests. 7 """Runs all the native unit tests.
8 8
9 1. Copy over test binary to /data/local on device. 9 1. Copy over test binary to /data/local on device.
10 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) 10 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
73 'content_unittests', 73 'content_unittests',
74 'gpu_unittests', 74 'gpu_unittests',
75 'ipc_tests', 75 'ipc_tests',
76 'net_unittests', 76 'net_unittests',
77 'sql_unittests', 77 'sql_unittests',
78 'sync_unit_tests', 78 'sync_unit_tests',
79 'ui_unittests', 79 'ui_unittests',
80 ] 80 ]
81 81
82 82
83 def FullyQualifiedTestSuites(exe, test_suites): 83 def FullyQualifiedTestSuites(exe, option_test_suite):
84 """Return a fully qualified list that represents all known suites. 84 """Return a fully qualified list
85 85
86 Args: 86 Args:
87 exe: if True, use the executable-based test runner. 87 exe: if True, use the executable-based test runner.
88 test_suites: the source test suites to process. 88 option_test_suite: the test_suite specified as an option.
89 """ 89 """
90 # Assume the test suites are in out/Release. 90 # Assume the test suites are in out/Release.
91 test_suite_dir = os.path.abspath(os.path.join(constants.CHROME_DIR, 91 test_suite_dir = os.path.abspath(os.path.join(constants.CHROME_DIR,
92 'out', 'Release')) 92 'out', 'Release'))
93 if option_test_suite:
94 all_test_suites = [option_test_suite]
95 else:
96 all_test_suites = _TEST_SUITES
97
93 if exe: 98 if exe:
94 suites = [os.path.join(test_suite_dir, t) for t in test_suites] 99 qualified_test_suites = [os.path.join(test_suite_dir, t)
100 for t in all_test_suites]
95 else: 101 else:
96 # out/Release/$SUITE_apk/$SUITE-debug.apk 102 # out/Release/$SUITE_apk/$SUITE-debug.apk
97 suites = [os.path.join(test_suite_dir, 103 qualified_test_suites = [os.path.join(test_suite_dir,
98 t + '_apk', 104 t + '_apk',
99 t + '-debug.apk') 105 t + '-debug.apk')
100 for t in test_suites] 106 for t in all_test_suites]
101 return suites 107 for t, q in zip(all_test_suites, qualified_test_suites):
108 if not os.path.exists(q):
109 logging.critical('Test suite %s not found in %s.\n'
110 'Supported test suites:\n %s\n'
111 'Ensure it has been built.\n',
112 t, q, _TEST_SUITES)
113 return []
114 return qualified_test_suites
102 115
103 116
104 class TimeProfile(object): 117 class TimeProfile(object):
105 """Class for simple profiling of action, with logging of cost.""" 118 """Class for simple profiling of action, with logging of cost."""
106 119
107 def __init__(self, description): 120 def __init__(self, description):
108 self._description = description 121 self._description = description
109 self.Start() 122 self.Start()
110 123
111 def Start(self): 124 def Start(self):
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
168 181
169 def PrintAnnotationForTestResults(test_results): 182 def PrintAnnotationForTestResults(test_results):
170 if test_results.timed_out: 183 if test_results.timed_out:
171 buildbot_report.PrintWarning() 184 buildbot_report.PrintWarning()
172 elif test_results.failed or test_results.crashed or test_results.overall_fail: 185 elif test_results.failed or test_results.crashed or test_results.overall_fail:
173 buildbot_report.PrintError() 186 buildbot_report.PrintError()
174 else: 187 else:
175 print 'Step success!' # No annotation needed 188 print 'Step success!' # No annotation needed
176 189
177 190
178 def RunTests(exe, device, test_suite, gtest_filter, test_arguments, rebaseline,
179 timeout, performance_test, cleanup_test_files, tool,
180 log_dump_name, fast_and_loose):
181 """Runs the tests.
182
183 Args:
184 exe: boolean to state if we are using the exe based test runner
185 device: Device to run the tests.
186 test_suite: A specific test suite to run, empty to run all.
187 gtest_filter: A gtest_filter flag.
188 test_arguments: Additional arguments to pass to the test binary.
189 rebaseline: Whether or not to run tests in isolation and update the filter.
190 timeout: Timeout for each test.
191 performance_test: Whether or not performance test(s).
192 cleanup_test_files: Whether or not to cleanup test files on device.
193 tool: Name of the Valgrind tool.
194 log_dump_name: Name of log dump file.
195 fast_and_loose: if set, skip copying data files.
196
197 Returns:
198 A TestResults object.
199 """
200 results = []
201
202 if test_suite:
203 if not os.path.exists(test_suite):
204 logging.critical('Unrecognized test suite %s, supported: %s',
205 test_suite, _TEST_SUITES)
206 if test_suite in _TEST_SUITES:
207 logging.critical('(Remember to include the path: out/Release/%s)',
208 test_suite)
209 test_suite_basename = os.path.basename(test_suite)
210 if test_suite_basename in _TEST_SUITES:
211 logging.critical('Try "make -j15 %s"', test_suite_basename)
212 else:
213 logging.critical('Unrecognized test suite, supported: %s',
214 _TEST_SUITES)
215 return TestResults.FromRun([], [BaseTestResult(test_suite, '')],
216 False, False)
217 fully_qualified_test_suites = [test_suite]
218 else:
219 fully_qualified_test_suites = FullyQualifiedTestSuites(exe, _TEST_SUITES)
220 debug_info_list = []
221 print 'Known suites: ' + str(_TEST_SUITES)
222 print 'Running these: ' + str(fully_qualified_test_suites)
223 for t in fully_qualified_test_suites:
224 buildbot_report.PrintNamedStep('Test suite %s' % os.path.basename(t))
225 test = SingleTestRunner(device, t, gtest_filter, test_arguments,
226 timeout, rebaseline, performance_test,
227 cleanup_test_files, tool, 0, not not log_dump_name,
228 fast_and_loose)
229 test.Run()
230
231 results += [test.test_results]
232 # Collect debug info.
233 debug_info_list += [test.dump_debug_info]
234 if rebaseline:
235 test.UpdateFilter(test.test_results.failed)
236 test.test_results.LogFull('Unit test', os.path.basename(t))
237 # Zip all debug info outputs into a file named by log_dump_name.
238 debug_info.GTestDebugInfo.ZipAndCleanResults(
239 os.path.join(constants.CHROME_DIR, 'out', 'Release', 'debug_info_dumps'),
240 log_dump_name, [d for d in debug_info_list if d])
241
242 PrintAnnotationForTestResults(test.test_results)
243
244 return TestResults.FromTestResults(results)
245
246
247 class TestSharder(BaseTestSharder): 191 class TestSharder(BaseTestSharder):
248 """Responsible for sharding the tests on the connected devices.""" 192 """Responsible for sharding the tests on the connected devices."""
249 193
250 def __init__(self, attached_devices, test_suite, gtest_filter, 194 def __init__(self, attached_devices, test_suite, gtest_filter,
251 test_arguments, timeout, rebaseline, performance_test, 195 test_arguments, timeout, rebaseline, performance_test,
252 cleanup_test_files, tool, log_dump_name, fast_and_loose): 196 cleanup_test_files, tool, log_dump_name, fast_and_loose):
253 BaseTestSharder.__init__(self, attached_devices) 197 BaseTestSharder.__init__(self, attached_devices)
254 self.test_suite = test_suite 198 self.test_suite = test_suite
255 self.test_suite_basename = os.path.basename(test_suite) 199 self.test_suite_basename = os.path.basename(test_suite)
256 self.gtest_filter = gtest_filter 200 self.gtest_filter = gtest_filter or ''
257 self.test_arguments = test_arguments 201 self.test_arguments = test_arguments
258 self.timeout = timeout 202 self.timeout = timeout
259 self.rebaseline = rebaseline 203 self.rebaseline = rebaseline
260 self.performance_test = performance_test 204 self.performance_test = performance_test
261 self.cleanup_test_files = cleanup_test_files 205 self.cleanup_test_files = cleanup_test_files
262 self.tool = tool 206 self.tool = tool
263 self.log_dump_name = log_dump_name 207 self.log_dump_name = log_dump_name
264 self.fast_and_loose = fast_and_loose 208 self.fast_and_loose = fast_and_loose
265 test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter, 209 test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter,
266 test_arguments, timeout, rebaseline, 210 test_arguments, timeout, rebaseline,
267 performance_test, cleanup_test_files, tool, 0, 211 performance_test, cleanup_test_files, tool, 0,
268 not not self.log_dump_name, fast_and_loose) 212 not not self.log_dump_name, fast_and_loose)
269 # The executable/apk needs to be copied before we can call GetAllTests. 213 self.tests = []
270 test.test_package.StripAndCopyExecutable() 214 if not self.gtest_filter:
271 all_tests = test.test_package.GetAllTests() 215 # No filter has been specified, let's add all tests then.
272 if not rebaseline: 216 # The executable/apk needs to be copied before we can call GetAllTests.
273 disabled_list = test.GetDisabledTests() 217 test.test_package.StripAndCopyExecutable()
274 # Only includes tests that do not have any match in the disabled list. 218 all_tests = test.test_package.GetAllTests()
275 all_tests = filter(lambda t: 219 if not rebaseline:
276 not any([fnmatch.fnmatch(t, disabled_pattern) 220 disabled_list = test.GetDisabledTests()
277 for disabled_pattern in disabled_list]), 221 # Only includes tests that do not have any match in the disabled list.
278 all_tests) 222 all_tests = filter(lambda t:
279 self.tests = all_tests 223 not any([fnmatch.fnmatch(t, disabled_pattern)
224 for disabled_pattern in disabled_list]),
225 all_tests)
226 self.tests = all_tests
280 227
281 def CreateShardedTestRunner(self, device, index): 228 def CreateShardedTestRunner(self, device, index):
282 """Creates a suite-specific test runner. 229 """Creates a suite-specific test runner.
283 230
284 Args: 231 Args:
285 device: Device serial where this shard will run. 232 device: Device serial where this shard will run.
286 index: Index of this device in the pool. 233 index: Index of this device in the pool.
287 234
288 Returns: 235 Returns:
289 A SingleTestRunner object. 236 A SingleTestRunner object.
290 """ 237 """
291 device_num = len(self.attached_devices) 238 device_num = len(self.attached_devices)
292 shard_size = (len(self.tests) + device_num - 1) / device_num 239 shard_size = (len(self.tests) + device_num - 1) / device_num
293 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size] 240 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size]
294 test_filter = ':'.join(shard_test_list) 241 test_filter = ':'.join(shard_test_list) + self.gtest_filter
295 return SingleTestRunner(device, self.test_suite, 242 return SingleTestRunner(device, self.test_suite,
296 test_filter, self.test_arguments, self.timeout, 243 test_filter, self.test_arguments, self.timeout,
297 self.rebaseline, self.performance_test, 244 self.rebaseline, self.performance_test,
298 self.cleanup_test_files, self.tool, index, 245 self.cleanup_test_files, self.tool, index,
299 not not self.log_dump_name, self.fast_and_loose) 246 not not self.log_dump_name, self.fast_and_loose)
300 247
301 def OnTestsCompleted(self, test_runners, test_results): 248 def OnTestsCompleted(self, test_runners, test_results):
302 """Notifies that we completed the tests.""" 249 """Notifies that we completed the tests."""
303 test_results.LogFull('Unit test', os.path.basename(self.test_suite)) 250 test_results.LogFull('Unit test', os.path.basename(self.test_suite))
304 PrintAnnotationForTestResults(test_results) 251 PrintAnnotationForTestResults(test_results)
305 if test_results.failed and self.rebaseline: 252 if test_results.failed and self.rebaseline:
306 test_runners[0].UpdateFilter(test_results.failed) 253 test_runners[0].UpdateFilter(test_results.failed)
254 if self.log_dump_name:
255 # Zip all debug info outputs into a file named by log_dump_name.
256 debug_info.GTestDebugInfo.ZipAndCleanResults(
257 os.path.join(constants.CHROME_DIR, 'out', 'Release',
258 'debug_info_dumps'),
259 self.log_dump_name)
307 260
308 261
309 def _RunATestSuite(options): 262 def _RunATestSuite(options):
310 """Run a single test suite. 263 """Run a single test suite.
311 264
312 Helper for Dispatch() to allow stop/restart of the emulator across 265 Helper for Dispatch() to allow stop/restart of the emulator across
313 test bundles. If using the emulator, we start it on entry and stop 266 test bundles. If using the emulator, we start it on entry and stop
314 it on exit. 267 it on exit.
315 268
316 Args: 269 Args:
317 options: options for running the tests. 270 options: options for running the tests.
318 271
319 Returns: 272 Returns:
320 0 if successful, number of failing tests otherwise. 273 0 if successful, number of failing tests otherwise.
321 """ 274 """
275 buildbot_report.PrintNamedStep('Test suite %s' % options.test_suite)
322 attached_devices = [] 276 attached_devices = []
323 buildbot_emulators = [] 277 buildbot_emulators = []
324 278
325 if options.use_emulator: 279 if options.use_emulator:
326 for n in range(options.emulator_count): 280 for n in range(options.emulator_count):
327 t = TimeProfile('Emulator launch %d' % n) 281 t = TimeProfile('Emulator launch %d' % n)
328 avd_name = None 282 avd_name = None
329 if n > 0: 283 if n > 0:
330 # Creates a temporary AVD for the extra emulators. 284 # Creates a temporary AVD for the extra emulators.
331 avd_name = 'run_tests_avd_%d' % n 285 avd_name = 'run_tests_avd_%d' % n
(...skipping 13 matching lines...) Expand all
345 if not attached_devices: 299 if not attached_devices:
346 logging.critical('A device must be attached and online.') 300 logging.critical('A device must be attached and online.')
347 buildbot_report.PrintError() 301 buildbot_report.PrintError()
348 return 1 302 return 1
349 303
350 # Reset the test port allocation. It's important to do it before starting 304 # Reset the test port allocation. It's important to do it before starting
351 # to dispatch any tests. 305 # to dispatch any tests.
352 if not ports.ResetTestServerPortAllocation(): 306 if not ports.ResetTestServerPortAllocation():
353 raise Exception('Failed to reset test server port.') 307 raise Exception('Failed to reset test server port.')
354 308
355 if (len(attached_devices) > 1 and options.test_suite and 309 if options.performance_test or options.gtest_filter:
356 not options.gtest_filter and not options.performance_test): 310 # These configuration can't be split in multiple devices.
357 sharder = TestSharder(attached_devices, options.test_suite, 311 attached_devices = [attached_devices[0]]
358 options.gtest_filter, options.test_arguments, 312 sharder = TestSharder(attached_devices, options.test_suite,
nilesh 2012/07/17 18:14:33 We are now using TestSharder in all cases. However
359 options.timeout, options.rebaseline, 313 options.gtest_filter, options.test_arguments,
360 options.performance_test, 314 options.timeout, options.rebaseline,
361 options.cleanup_test_files, options.tool, 315 options.performance_test,
362 options.log_dump, options.fast_and_loose) 316 options.cleanup_test_files, options.tool,
363 test_results = sharder.RunShardedTests() 317 options.log_dump, options.fast_and_loose)
364 else: 318 test_results = sharder.RunShardedTests()
365 test_results = RunTests(options.exe, attached_devices[0],
366 options.test_suite,
367 options.gtest_filter, options.test_arguments,
368 options.rebaseline, options.timeout,
369 options.performance_test,
370 options.cleanup_test_files, options.tool,
371 options.log_dump, options.fast_and_loose)
372 319
373 for buildbot_emulator in buildbot_emulators: 320 for buildbot_emulator in buildbot_emulators:
374 buildbot_emulator.Shutdown() 321 buildbot_emulator.Shutdown()
375 322
376 # Another chance if we timed out? At this point It is safe(r) to 323 # Another chance if we timed out? At this point It is safe(r) to
377 # run fast and loose since we just uploaded all the test data and 324 # run fast and loose since we just uploaded all the test data and
378 # binary. 325 # binary.
379 if test_results.timed_out and options.repeat: 326 if test_results.timed_out and options.repeat:
380 logging.critical('Timed out; repeating in fast_and_loose mode.') 327 logging.critical('Timed out; repeating in fast_and_loose mode.')
381 options.fast_and_loose = True 328 options.fast_and_loose = True
(...skipping 16 matching lines...) Expand all
398 0 if successful, number of failing tests otherwise. 345 0 if successful, number of failing tests otherwise.
399 """ 346 """
400 if options.test_suite == 'help': 347 if options.test_suite == 'help':
401 ListTestSuites() 348 ListTestSuites()
402 return 0 349 return 0
403 350
404 if options.use_xvfb: 351 if options.use_xvfb:
405 xvfb = Xvfb() 352 xvfb = Xvfb()
406 xvfb.Start() 353 xvfb.Start()
407 354
408 if options.test_suite: 355 all_test_suites = FullyQualifiedTestSuites(options.exe, options.test_suite)
409 all_test_suites = FullyQualifiedTestSuites(options.exe,
410 [options.test_suite])
411 else:
412 all_test_suites = FullyQualifiedTestSuites(options.exe,
413 _TEST_SUITES)
414 failures = 0 356 failures = 0
415 for suite in all_test_suites: 357 for suite in all_test_suites:
416 options.test_suite = suite 358 options.test_suite = suite
417 failures += _RunATestSuite(options) 359 failures += _RunATestSuite(options)
418 360
419 if options.use_xvfb: 361 if options.use_xvfb:
420 xvfb.Stop() 362 xvfb.Stop()
421 return failures 363 return failures
422 364
423 365
(...skipping 16 matching lines...) Expand all
440 help='Rebaseline and update *testsuite_disabled', 382 help='Rebaseline and update *testsuite_disabled',
441 action='store_true') 383 action='store_true')
442 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter', 384 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
443 help='gtest filter') 385 help='gtest filter')
444 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', 386 option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
445 help='Additional arguments to pass to the test') 387 help='Additional arguments to pass to the test')
446 option_parser.add_option('-p', dest='performance_test', 388 option_parser.add_option('-p', dest='performance_test',
447 help='Indicator of performance test', 389 help='Indicator of performance test',
448 action='store_true') 390 action='store_true')
449 option_parser.add_option('-L', dest='log_dump', 391 option_parser.add_option('-L', dest='log_dump',
450 help='file name of log dump, which will be put in' 392 help='file name of log dump, which will be put in '
451 'subfolder debug_info_dumps under the same directory' 393 'subfolder debug_info_dumps under the same '
452 'in where the test_suite exists.') 394 'directory in where the test_suite exists.')
453 option_parser.add_option('-e', '--emulator', dest='use_emulator', 395 option_parser.add_option('-e', '--emulator', dest='use_emulator',
454 action='store_true', 396 action='store_true',
455 help='Run tests in a new instance of emulator') 397 help='Run tests in a new instance of emulator')
456 option_parser.add_option('-n', '--emulator_count', 398 option_parser.add_option('-n', '--emulator_count',
457 type='int', default=1, 399 type='int', default=1,
458 help='Number of emulators to launch for running the ' 400 help='Number of emulators to launch for running the '
459 'tests.') 401 'tests.')
460 option_parser.add_option('-x', '--xvfb', dest='use_xvfb', 402 option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
461 action='store_true', 403 action='store_true',
462 help='Use Xvfb around tests (ignored if not Linux)') 404 help='Use Xvfb around tests (ignored if not Linux)')
(...skipping 29 matching lines...) Expand all
492 # the batch (this happens because the exit status is a sum of all failures 434 # the batch (this happens because the exit status is a sum of all failures
493 # from all suites, but the buildbot associates the exit status only with the 435 # from all suites, but the buildbot associates the exit status only with the
494 # most recent step). 436 # most recent step).
495 if options.exit_code: 437 if options.exit_code:
496 return failed_tests_count 438 return failed_tests_count
497 return 0 439 return 0
498 440
499 441
500 if __name__ == '__main__': 442 if __name__ == '__main__':
501 sys.exit(main(sys.argv)) 443 sys.exit(main(sys.argv))
OLDNEW
« no previous file with comments | « build/android/pylib/single_test_runner.py ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698