Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(509)

Side by Side Diff: build/android/run_tests.py

Issue 9185043: Increase Android test robustness. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Remove 'ALWAYS' Created 8 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « build/android/emulator.py ('k') | build/android/single_test_runner.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved.
3 # Use of this source code is governed by a BSD-style license that can be 3 # Use of this source code is governed by a BSD-style license that can be
4 # found in the LICENSE file. 4 # found in the LICENSE file.
5 5
6 """Runs all the native unit tests. 6 """Runs all the native unit tests.
7 7
8 1. Copy over test binary to /data/local on device. 8 1. Copy over test binary to /data/local on device.
9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) 9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak)
10 to be deployed to the device (in /data/local/tmp). 10 to be deployed to the device (in /data/local/tmp).
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
125 try: 125 try:
126 os.kill(self._pid, signal.SIGKILL) 126 os.kill(self._pid, signal.SIGKILL)
127 except: 127 except:
128 pass 128 pass
129 del os.environ['DISPLAY'] 129 del os.environ['DISPLAY']
130 self._pid = 0 130 self._pid = 0
131 131
132 132
133 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, 133 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline,
134 timeout, performance_test, cleanup_test_files, tool, 134 timeout, performance_test, cleanup_test_files, tool,
135 log_dump_name): 135 log_dump_name, fast_and_loose=False):
136 """Runs the tests. 136 """Runs the tests.
137 137
138 Args: 138 Args:
139 device: Device to run the tests. 139 device: Device to run the tests.
140 test_suite: A specific test suite to run, empty to run all. 140 test_suite: A specific test suite to run, empty to run all.
141 gtest_filter: A gtest_filter flag. 141 gtest_filter: A gtest_filter flag.
142 test_arguments: Additional arguments to pass to the test binary. 142 test_arguments: Additional arguments to pass to the test binary.
143 rebaseline: Whether or not to run tests in isolation and update the filter. 143 rebaseline: Whether or not to run tests in isolation and update the filter.
144 timeout: Timeout for each test. 144 timeout: Timeout for each test.
145 performance_test: Whether or not performance test(s). 145 performance_test: Whether or not performance test(s).
146 cleanup_test_files: Whether or not to cleanup test files on device. 146 cleanup_test_files: Whether or not to cleanup test files on device.
147 tool: Name of the Valgrind tool. 147 tool: Name of the Valgrind tool.
148 log_dump_name: Name of log dump file. 148 log_dump_name: Name of log dump file.
149 fast_and_loose: should we go extra-fast but sacrifice stability
150 and/or correctness? Intended for quick cycle testing; not for bots!
149 151
150 Returns: 152 Returns:
151 A TestResults object. 153 A TestResults object.
152 """ 154 """
153 results = [] 155 results = []
154 156
155 if test_suite: 157 if test_suite:
156 global _TEST_SUITES 158 global _TEST_SUITES
157 if not os.path.exists(test_suite): 159 if not os.path.exists(test_suite):
158 logging.critical('Unrecognized test suite, supported: %s' % 160 logging.critical('Unrecognized test suite, supported: %s' %
159 _TEST_SUITES) 161 _TEST_SUITES)
160 if test_suite in _TEST_SUITES: 162 if test_suite in _TEST_SUITES:
161 logging.critical('(Remember to include the path: out/Release/%s)', 163 logging.critical('(Remember to include the path: out/Release/%s)',
162 test_suite) 164 test_suite)
163 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')]) 165 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')])
164 _TEST_SUITES = [test_suite] 166 _TEST_SUITES = [test_suite]
165 else: 167 else:
166 # If not specified, assume the test suites are in out/Release 168 # If not specified, assume the test suites are in out/Release
167 test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR, 169 test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR,
168 'out', 'Release')) 170 'out', 'Release'))
169 _TEST_SUITES = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES] 171 _TEST_SUITES = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES]
170 debug_info_list = [] 172 debug_info_list = []
171 print _TEST_SUITES # So it shows up in buildbot output 173 print _TEST_SUITES # So it shows up in buildbot output
172 for t in _TEST_SUITES: 174 for t in _TEST_SUITES:
173 test = SingleTestRunner(device, t, gtest_filter, test_arguments, 175 test = SingleTestRunner(device, t, gtest_filter, test_arguments,
174 timeout, rebaseline, performance_test, 176 timeout, rebaseline, performance_test,
175 cleanup_test_files, tool, not not log_dump_name) 177 cleanup_test_files, tool, not not log_dump_name,
178 fast_and_loose=fast_and_loose)
176 test.RunTests() 179 test.RunTests()
177 results += [test.test_results] 180 results += [test.test_results]
178 # Collect debug info. 181 # Collect debug info.
179 debug_info_list += [test.dump_debug_info] 182 debug_info_list += [test.dump_debug_info]
180 if rebaseline: 183 if rebaseline:
181 test.UpdateFilter(test.test_results.failed) 184 test.UpdateFilter(test.test_results.failed)
182 elif test.test_results.failed: 185 elif test.test_results.failed:
183 # Stop running test if encountering failed test. 186 # Stop running test if encountering failed test.
184 test.test_results.LogFull() 187 test.test_results.LogFull()
185 break 188 break
186 # Zip all debug info outputs into a file named by log_dump_name. 189 # Zip all debug info outputs into a file named by log_dump_name.
187 debug_info.GTestDebugInfo.ZipAndCleanResults( 190 debug_info.GTestDebugInfo.ZipAndCleanResults(
188 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', 191 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release',
189 'debug_info_dumps'), 192 'debug_info_dumps'),
190 log_dump_name, [d for d in debug_info_list if d]) 193 log_dump_name, [d for d in debug_info_list if d])
191 return TestResults.FromTestResults(results) 194 return TestResults.FromTestResults(results)
192 195
196
193 def Dispatch(options): 197 def Dispatch(options):
194 """Dispatches the tests, sharding if possible. 198 """Dispatches the tests, sharding if possible.
195 199
196 If options.use_emulator is True, all tests will be run in a new emulator 200 If options.use_emulator is True, all tests will be run in a new emulator
197 instance. 201 instance.
198 202
199 Args: 203 Args:
200 options: options for running the tests. 204 options: options for running the tests.
201 205
202 Returns: 206 Returns:
203 0 if successful, number of failing tests otherwise. 207 0 if successful, number of failing tests otherwise.
204 """ 208 """
205 if options.test_suite == 'help': 209 if options.test_suite == 'help':
206 ListTestSuites() 210 ListTestSuites()
207 return 0 211 return 0
208 buildbot_emulator = None 212 buildbot_emulator = None
209 attached_devices = [] 213 attached_devices = []
210 214
211 if options.use_xvfb: 215 if options.use_xvfb:
212 xvfb = Xvfb() 216 xvfb = Xvfb()
213 xvfb.Start() 217 xvfb.Start()
214 218
215 if options.use_emulator: 219 if options.use_emulator:
216 t = TimeProfile('Emulator launch') 220 t = TimeProfile('Emulator launch')
217 buildbot_emulator = emulator.Emulator() 221 buildbot_emulator = emulator.Emulator(options.fast_and_loose)
218 buildbot_emulator.Launch() 222 buildbot_emulator.Launch()
219 t.Stop() 223 t.Stop()
220 attached_devices.append(buildbot_emulator.device) 224 attached_devices.append(buildbot_emulator.device)
221 else: 225 else:
222 attached_devices = android_commands.GetAttachedDevices() 226 attached_devices = android_commands.GetAttachedDevices()
223 227
224 if not attached_devices: 228 if not attached_devices:
225 logging.critical('A device must be attached and online.') 229 logging.critical('A device must be attached and online.')
226 return 1 230 return 1
227 231
228 test_results = RunTests(attached_devices[0], options.test_suite, 232 test_results = RunTests(attached_devices[0], options.test_suite,
229 options.gtest_filter, options.test_arguments, 233 options.gtest_filter, options.test_arguments,
230 options.rebaseline, options.timeout, 234 options.rebaseline, options.timeout,
231 options.performance_test, 235 options.performance_test,
232 options.cleanup_test_files, options.tool, 236 options.cleanup_test_files, options.tool,
233 options.log_dump) 237 options.log_dump,
238 fast_and_loose=options.fast_and_loose)
234 if buildbot_emulator: 239 if buildbot_emulator:
235 buildbot_emulator.Shutdown() 240 buildbot_emulator.Shutdown()
236 if options.use_xvfb: 241 if options.use_xvfb:
237 xvfb.Stop() 242 xvfb.Stop()
238 243
239 return len(test_results.failed) 244 # Another chance if we timed out? At this point It is safe(r) to
245 # run fast and loose since we just uploaded all the test data and
246 # binary.
247 if test_results.timed_out and options.repeat:
248 logging.critical('Timed out; repeating in fast_and_loose mode.')
249 options.fast_and_loose = True
250 options.repeat = options.repeat - 1
251 logging.critical('Repeats left: ' + str(options.repeat))
252 return Dispatch(options)
253 else:
254 return len(test_results.failed)
255
240 256
241 def ListTestSuites(): 257 def ListTestSuites():
242 """Display a list of available test suites 258 """Display a list of available test suites
243 """ 259 """
244 print 'Available test suites are:' 260 print 'Available test suites are:'
245 for test_suite in _TEST_SUITES: 261 for test_suite in _TEST_SUITES:
246 print test_suite 262 print test_suite
247 263
248 264
249 def main(argv): 265 def main(argv):
250 option_parser = run_tests_helper.CreateTestRunnerOptionParser(None, 266 option_parser = run_tests_helper.CreateTestRunnerOptionParser(None,
251 default_timeout=0) 267 default_timeout=0)
252 option_parser.add_option('-s', dest='test_suite', 268 option_parser.add_option('-s', dest='test_suite',
253 help='Executable name of the test suite to run ' 269 help='Executable name of the test suite to run '
254 '(use -s help to list them)') 270 '(use -s help to list them)')
255 option_parser.add_option('-r', dest='rebaseline', 271 option_parser.add_option('-r', dest='rebaseline',
256 help='Rebaseline and update *testsuite_disabled', 272 help='Rebaseline and update *testsuite_disabled',
257 action='store_true', 273 action='store_true',
258 default=False) 274 default=False)
259 option_parser.add_option('-f', dest='gtest_filter', 275 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter',
260 help='gtest filter') 276 help='gtest filter')
261 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', 277 option_parser.add_option('-a', '--test_arguments', dest='test_arguments',
262 help='Additional arguments to pass to the test') 278 help='Additional arguments to pass to the test')
263 option_parser.add_option('-p', dest='performance_test', 279 option_parser.add_option('-p', dest='performance_test',
264 help='Indicator of performance test', 280 help='Indicator of performance test',
265 action='store_true', 281 action='store_true',
266 default=False) 282 default=False)
267 option_parser.add_option('-L', dest='log_dump', 283 option_parser.add_option('-L', dest='log_dump',
268 help='file name of log dump, which will be put in' 284 help='file name of log dump, which will be put in'
269 'subfolder debug_info_dumps under the same directory' 285 'subfolder debug_info_dumps under the same directory'
270 'in where the test_suite exists.') 286 'in where the test_suite exists.')
271 option_parser.add_option('-e', '--emulator', dest='use_emulator', 287 option_parser.add_option('-e', '--emulator', dest='use_emulator',
272 help='Run tests in a new instance of emulator', 288 help='Run tests in a new instance of emulator',
273 action='store_true', 289 action='store_true',
274 default=False) 290 default=False)
275 option_parser.add_option('-x', '--xvfb', dest='use_xvfb', 291 option_parser.add_option('-x', '--xvfb', dest='use_xvfb',
276 action='store_true', default=False, 292 action='store_true', default=False,
277 help='Use Xvfb around tests (ignored if not Linux)') 293 help='Use Xvfb around tests (ignored if not Linux)')
294 option_parser.add_option('--fast', '--fast_and_loose', dest='fast_and_loose',
295 action='store_true', default=False,
296 help='Go faster (but be less stable), '
297 'for quick testing. Example: when tracking down '
298 'tests that hang to add to the disabled list, '
299 'there is no need to redeploy the test binary '
300 'or data to the device again. '
301 'Don\'t use on bots by default!')
302 option_parser.add_option('--repeat', dest='repeat', type='int',
303 default=2,
304 help='Repeat count on test timeout')
278 options, args = option_parser.parse_args(argv) 305 options, args = option_parser.parse_args(argv)
279 if len(args) > 1: 306 if len(args) > 1:
280 print 'Unknown argument:', args[1:] 307 print 'Unknown argument:', args[1:]
281 option_parser.print_usage() 308 option_parser.print_usage()
282 sys.exit(1) 309 sys.exit(1)
283 run_tests_helper.SetLogLevel(options.verbose_count) 310 run_tests_helper.SetLogLevel(options.verbose_count)
284 return Dispatch(options) 311 return Dispatch(options)
285 312
286 313
287 if __name__ == '__main__': 314 if __name__ == '__main__':
288 sys.exit(main(sys.argv)) 315 sys.exit(main(sys.argv))
OLDNEW
« no previous file with comments | « build/android/emulator.py ('k') | build/android/single_test_runner.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698