Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(357)

Side by Side Diff: build/android/run_tests.py

Issue 10051021: apk-based test runner work for android (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 8 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « build/android/envsetup.sh ('k') | build/android/single_test_runner.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2012 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runs all the native unit tests. 6 """Runs all the native unit tests.
7 7
8 1. Copy over test binary to /data/local on device. 8 1. Copy over test binary to /data/local on device.
9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) 9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
10 to be deployed to the device (in /data/local/tmp). 10 to be deployed to the device (in /data/local/tmp).
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
70 'content_unittests', 70 'content_unittests',
71 'gpu_unittests', 71 'gpu_unittests',
72 'ipc_tests', 72 'ipc_tests',
73 'net_unittests', 73 'net_unittests',
74 'sql_unittests', 74 'sql_unittests',
75 'sync_unit_tests', 75 'sync_unit_tests',
76 'ui_unittests', 76 'ui_unittests',
77 ] 77 ]
78 78
79 79
80 def FullyQualifiedTestSuites(): 80 def FullyQualifiedTestSuites(apk):
81 """Return a fully qualified list that represents all known suites.""" 81 """Return a fully qualified list that represents all known suites.
82
83 Args:
84 apk: if True, use the apk-based test runner"""
82 # If not specified, assume the test suites are in out/Release 85 # If not specified, assume the test suites are in out/Release
83 test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR, 86 test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR,
84 'out', 'Release')) 87 'out', 'Release'))
85 return [os.path.join(test_suite_dir, t) for t in _TEST_SUITES] 88 if apk:
89 # out/Release/$SUITE_apk/ChromeNativeTests-debug.apk
90 suites = [os.path.join(test_suite_dir,
91 t + '_apk',
92 'ChromeNativeTests-debug.apk')
93 for t in _TEST_SUITES]
94 else:
95 suites = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES]
96 return suites
86 97
87 98
88 class TimeProfile(object): 99 class TimeProfile(object):
89 """Class for simple profiling of action, with logging of cost.""" 100 """Class for simple profiling of action, with logging of cost."""
90 101
91 def __init__(self, description): 102 def __init__(self, description):
92 self._description = description 103 self._description = description
93 self.Start() 104 self.Start()
94 105
95 def Start(self): 106 def Start(self):
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
144 try: 155 try:
145 os.kill(self._pid, signal.SIGKILL) 156 os.kill(self._pid, signal.SIGKILL)
146 except: 157 except:
147 pass 158 pass
148 del os.environ['DISPLAY'] 159 del os.environ['DISPLAY']
149 self._pid = 0 160 self._pid = 0
150 161
151 162
152 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, 163 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline,
153 timeout, performance_test, cleanup_test_files, tool, 164 timeout, performance_test, cleanup_test_files, tool,
154 log_dump_name, annotate=False): 165 log_dump_name, apk, annotate=False):
155 """Runs the tests. 166 """Runs the tests.
156 167
157 Args: 168 Args:
158 device: Device to run the tests. 169 device: Device to run the tests.
159 test_suite: A specific test suite to run, empty to run all. 170 test_suite: A specific test suite to run, empty to run all.
160 gtest_filter: A gtest_filter flag. 171 gtest_filter: A gtest_filter flag.
161 test_arguments: Additional arguments to pass to the test binary. 172 test_arguments: Additional arguments to pass to the test binary.
162 rebaseline: Whether or not to run tests in isolation and update the filter. 173 rebaseline: Whether or not to run tests in isolation and update the filter.
163 timeout: Timeout for each test. 174 timeout: Timeout for each test.
164 performance_test: Whether or not performance test(s). 175 performance_test: Whether or not performance test(s).
165 cleanup_test_files: Whether or not to cleanup test files on device. 176 cleanup_test_files: Whether or not to cleanup test files on device.
166 tool: Name of the Valgrind tool. 177 tool: Name of the Valgrind tool.
167 log_dump_name: Name of log dump file. 178 log_dump_name: Name of log dump file.
179 apk: boolean to state if we are using the apk based test runner
168 annotate: should we print buildbot-style annotations? 180 annotate: should we print buildbot-style annotations?
169 181
170 Returns: 182 Returns:
171 A TestResults object. 183 A TestResults object.
172 """ 184 """
173 results = [] 185 results = []
174 186
175 if test_suite: 187 if test_suite:
176 global _TEST_SUITES 188 global _TEST_SUITES
177 if not os.path.exists(test_suite): 189 if (not os.path.exists(test_suite) and
190 not os.path.splitext(test_suite)[1] == '.apk'):
178 logging.critical('Unrecognized test suite %s, supported: %s' % 191 logging.critical('Unrecognized test suite %s, supported: %s' %
179 (test_suite, _TEST_SUITES)) 192 (test_suite, _TEST_SUITES))
180 if test_suite in _TEST_SUITES: 193 if test_suite in _TEST_SUITES:
181 logging.critical('(Remember to include the path: out/Release/%s)', 194 logging.critical('(Remember to include the path: out/Release/%s)',
182 test_suite) 195 test_suite)
183 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')]) 196 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')],
197 False, False)
184 fully_qualified_test_suites = [test_suite] 198 fully_qualified_test_suites = [test_suite]
185 else: 199 else:
186 fully_qualified_test_suites = FullyQualifiedTestSuites() 200 fully_qualified_test_suites = FullyQualifiedTestSuites(apk)
187 debug_info_list = [] 201 debug_info_list = []
188 print 'Known suites: ' + str(_TEST_SUITES) 202 print 'Known suites: ' + str(_TEST_SUITES)
189 print 'Running these: ' + str(fully_qualified_test_suites) 203 print 'Running these: ' + str(fully_qualified_test_suites)
190 for t in fully_qualified_test_suites: 204 for t in fully_qualified_test_suites:
191 if annotate: 205 if annotate:
192 print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t) 206 print '@@@BUILD_STEP Test suite %s@@@' % os.path.basename(t)
193 test = SingleTestRunner(device, t, gtest_filter, test_arguments, 207 test = SingleTestRunner(device, t, gtest_filter, test_arguments,
194 timeout, rebaseline, performance_test, 208 timeout, rebaseline, performance_test,
195 cleanup_test_files, tool, 0, not not log_dump_name) 209 cleanup_test_files, tool, 0, not not log_dump_name)
196 test.Run() 210 test.Run()
197 211
198 results += [test.test_results] 212 results += [test.test_results]
199 # Collect debug info. 213 # Collect debug info.
200 debug_info_list += [test.dump_debug_info] 214 debug_info_list += [test.dump_debug_info]
201 if rebaseline: 215 if rebaseline:
202 test.UpdateFilter(test.test_results.failed) 216 test.UpdateFilter(test.test_results.failed)
203 elif test.test_results.failed: 217 elif test.test_results.failed:
204 test.test_results.LogFull() 218 test.test_results.LogFull()
205 # Zip all debug info outputs into a file named by log_dump_name. 219 # Zip all debug info outputs into a file named by log_dump_name.
206 debug_info.GTestDebugInfo.ZipAndCleanResults( 220 debug_info.GTestDebugInfo.ZipAndCleanResults(
207 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', 221 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release',
208 'debug_info_dumps'), 222 'debug_info_dumps'),
209 log_dump_name, [d for d in debug_info_list if d]) 223 log_dump_name, [d for d in debug_info_list if d])
210 224
211 if annotate: 225 if annotate:
212 if test.test_results.timed_out: 226 if test.test_results.timed_out:
213 print '@@@STEP_WARNINGS@@@' 227 print '@@@STEP_WARNINGS@@@'
214 elif test.test_results.failed: 228 elif test.test_results.failed:
215 print '@@@STEP_FAILURE@@@' 229 print '@@@STEP_FAILURE@@@'
230 elif test.test_results.overall_fail:
231 print '@@@STEP_FAILURE@@@'
216 else: 232 else:
217 print 'Step success!' # No annotation needed 233 print 'Step success!' # No annotation needed
218 234
219 return TestResults.FromTestResults(results) 235 return TestResults.FromTestResults(results)
220 236
221 237
222 class TestSharder(BaseTestSharder): 238 class TestSharder(BaseTestSharder):
223 """Responsible for sharding the tests on the connected devices.""" 239 """Responsible for sharding the tests on the connected devices."""
224 240
225 def __init__(self, attached_devices, test_suite, gtest_filter, 241 def __init__(self, attached_devices, test_suite, gtest_filter,
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
316 options.performance_test, 332 options.performance_test,
317 options.cleanup_test_files, options.tool) 333 options.cleanup_test_files, options.tool)
318 test_results = sharder.RunShardedTests() 334 test_results = sharder.RunShardedTests()
319 else: 335 else:
320 test_results = RunTests(attached_devices[0], options.test_suite, 336 test_results = RunTests(attached_devices[0], options.test_suite,
321 options.gtest_filter, options.test_arguments, 337 options.gtest_filter, options.test_arguments,
322 options.rebaseline, options.timeout, 338 options.rebaseline, options.timeout,
323 options.performance_test, 339 options.performance_test,
324 options.cleanup_test_files, options.tool, 340 options.cleanup_test_files, options.tool,
325 options.log_dump, 341 options.log_dump,
342 options.apk,
326 annotate=options.annotate) 343 annotate=options.annotate)
327 344
328 for buildbot_emulator in buildbot_emulators: 345 for buildbot_emulator in buildbot_emulators:
329 buildbot_emulator.Shutdown() 346 buildbot_emulator.Shutdown()
330 347
331 # Another chance if we timed out? At this point It is safe(r) to 348 # Another chance if we timed out? At this point It is safe(r) to
332 # run fast and loose since we just uploaded all the test data and 349 # run fast and loose since we just uploaded all the test data and
333 # binary. 350 # binary.
334 if test_results.timed_out and options.repeat: 351 if test_results.timed_out and options.repeat:
335 logging.critical('Timed out; repeating in fast_and_loose mode.') 352 logging.critical('Timed out; repeating in fast_and_loose mode.')
(...skipping 20 matching lines...) Expand all
356 ListTestSuites() 373 ListTestSuites()
357 return 0 374 return 0
358 375
359 if options.use_xvfb: 376 if options.use_xvfb:
360 xvfb = Xvfb() 377 xvfb = Xvfb()
361 xvfb.Start() 378 xvfb.Start()
362 379
363 if options.test_suite: 380 if options.test_suite:
364 all_test_suites = [options.test_suite] 381 all_test_suites = [options.test_suite]
365 else: 382 else:
366 all_test_suites = FullyQualifiedTestSuites() 383 all_test_suites = FullyQualifiedTestSuites(options.apk)
367 failures = 0 384 failures = 0
368 for suite in all_test_suites: 385 for suite in all_test_suites:
369 options.test_suite = suite 386 options.test_suite = suite
370 failures += _RunATestSuite(options) 387 failures += _RunATestSuite(options)
371 388
372 if options.use_xvfb: 389 if options.use_xvfb:
373 xvfb.Stop() 390 xvfb.Stop()
374 if options.annotate: 391 if options.annotate:
375 print '@@@BUILD_STEP Test Finished@@@' 392 print '@@@BUILD_STEP Test Finished@@@'
376 return failures 393 return failures
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
420 'tests that hang to add to the disabled list, ' 437 'tests that hang to add to the disabled list, '
421 'there is no need to redeploy the test binary ' 438 'there is no need to redeploy the test binary '
422 'or data to the device again. ' 439 'or data to the device again. '
423 'Don\'t use on bots by default!') 440 'Don\'t use on bots by default!')
424 option_parser.add_option('--repeat', dest='repeat', type='int', 441 option_parser.add_option('--repeat', dest='repeat', type='int',
425 default=2, 442 default=2,
426 help='Repeat count on test timeout') 443 help='Repeat count on test timeout')
427 option_parser.add_option('--annotate', default=True, 444 option_parser.add_option('--annotate', default=True,
428 help='Print buildbot-style annotate messages ' 445 help='Print buildbot-style annotate messages '
429 'for each test suite. Default=True') 446 'for each test suite. Default=True')
447 option_parser.add_option('--apk', default=False,
448 help='Use the apk test runner '
449 '(off by default for now)')
430 options, args = option_parser.parse_args(argv) 450 options, args = option_parser.parse_args(argv)
431 if len(args) > 1: 451 if len(args) > 1:
432 print 'Unknown argument:', args[1:] 452 print 'Unknown argument:', args[1:]
433 option_parser.print_usage() 453 option_parser.print_usage()
434 sys.exit(1) 454 sys.exit(1)
435 run_tests_helper.SetLogLevel(options.verbose_count) 455 run_tests_helper.SetLogLevel(options.verbose_count)
436 return Dispatch(options) 456 return Dispatch(options)
437 457
438 458
439 if __name__ == '__main__': 459 if __name__ == '__main__':
440 sys.exit(main(sys.argv)) 460 sys.exit(main(sys.argv))
OLDNEW
« no previous file with comments | « build/android/envsetup.sh ('k') | build/android/single_test_runner.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698