Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # | 2 # |
| 3 # Copyright (c) 2012 The Chromium Authors. All rights reserved. | 3 # Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
| 5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
| 6 | 6 |
| 7 """Runs all the native unit tests. | 7 """Runs all the native unit tests. |
| 8 | 8 |
| 9 1. Copy over test binary to /data/local on device. | 9 1. Copy over test binary to /data/local on device. |
| 10 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) | 10 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) |
| 11 to be deployed to the device. We use the device's $EXTERNAL_STORAGE as the | 11 to be deployed to the device. We use the device's $EXTERNAL_STORAGE as the |
| 12 base dir (which maps to Context.getExternalFilesDir()). | 12 base dir (which maps to Context.getExternalFilesDir()). |
| 13 3. Environment: | 13 3. Environment: |
| 14 3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named: | 14 3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named: |
| 15 $EXTERNAL_STORAGE + /chrome/test/data | 15 $EXTERNAL_STORAGE + /chrome/test/data |
| 16 3.2. page_cycler_tests have following requirements, | 16 3.2. page_cycler_tests have following requirements, |
| 17 3.2.1 the following data on host: | 17 3.2.1 the following data on host: |
| 18 <chrome_src_dir>/tools/page_cycler | 18 <chrome_src_dir>/tools/page_cycler |
| 19 <chrome_src_dir>/data/page_cycler | 19 <chrome_src_dir>/data/page_cycler |
| 20 3.2.2. two data directories to store above test data on device named: | 20 3.2.2. two data directories to store above test data on device named: |
| 21 $EXTERNAL_STORAGE + /tools/ (for database perf test) | 21 $EXTERNAL_STORAGE + /tools/ (for database perf test) |
| 22 $EXTERNAL_STORAGE + /data/ (for other perf tests) | 22 $EXTERNAL_STORAGE + /data/ (for other perf tests) |
| 23 3.2.3. a http server to serve http perf tests. | 23 3.2.3. a http server to serve http perf tests. |
| 24 The http root is host's <chrome_src_dir>/data/page_cycler/, port 8000. | 24 The http root is host's <chrome_src_dir>/data/page_cycler/, port 8000. |
| 25 3.2.4 a tool named forwarder is also required to run on device to | 25 3.2.4 a tool named forwarder is also required to run on device to |
| 26 forward the http request/response between host and device. | 26 forward the http request/response between host and device. |
| 27 3.2.5 Chrome is installed on device. | 27 3.2.5 Chrome is installed on device. |
| 28 4. Run the binary in the device and stream the log to the host. | 28 4. Run the binary in the device and stream the log to the host. |
| 29 4.1. Optionally, filter specific tests. | 29 4.1. Optionally, filter specific tests. |
| 30 4.2. Optionally, rebaseline: run the available tests and update the | 30 4.2. If we're running a single test suite and we have multiple devices |
| 31 suppressions file for failures. | |
| 32 4.3. If we're running a single test suite and we have multiple devices | |
| 33 connected, we'll shard the tests. | 31 connected, we'll shard the tests. |
| 34 5. Clean up the device. | 32 5. Clean up the device. |
| 35 | 33 |
| 36 Suppressions: | 34 Suppressions: |
| 37 | 35 |
| 38 Individual tests in a test binary can be suppressed by listing it in | 36 Individual tests in a test binary can be suppressed by listing it in |
| 39 the gtest_filter directory in a file of the same name as the test binary, | 37 the gtest_filter directory in a file of the same name as the test binary, |
| 40 one test per line. Here is an example: | 38 one test per line. Here is an example: |
| 41 | 39 |
| 42 $ cat gtest_filter/base_unittests_disabled | 40 $ cat gtest_filter/base_unittests_disabled |
| 43 DataPackTest.Load | 41 DataPackTest.Load |
| 44 ReadOnlyFileUtilTest.ContentsEqual | 42 ReadOnlyFileUtilTest.ContentsEqual |
| 45 | 43 |
| 46 This file is generated by the tests running on devices. If running on emulator, | 44 This file is generated by the tests running on devices. If running on emulator, |
| 47 additonal filter file which lists the tests only failed in emulator will be | 45 additonal filter file which lists the tests only failed in emulator will be |
| 48 loaded. We don't care about the rare testcases which succeeded on emuatlor, but | 46 loaded. We don't care about the rare testcases which succeeded on emuatlor, but |
| 49 failed on device. | 47 failed on device. |
| 50 """ | 48 """ |
| 51 | 49 |
| 52 import copy | 50 import copy |
| 53 import fnmatch | 51 import fnmatch |
| 54 import logging | 52 import logging |
| 55 import optparse | 53 import optparse |
| 56 import os | 54 import os |
| 57 import signal | 55 import signal |
| 58 import subprocess | 56 import subprocess |
| 59 import sys | 57 import sys |
| 60 import time | 58 import time |
| 61 | 59 |
| 60 import emulator | |
| 62 from pylib import android_commands | 61 from pylib import android_commands |
| 63 from pylib.base_test_sharder import BaseTestSharder | |
| 64 from pylib import buildbot_report | 62 from pylib import buildbot_report |
| 65 from pylib import cmd_helper | 63 from pylib import cmd_helper |
| 66 from pylib import constants | |
| 67 from pylib import debug_info | 64 from pylib import debug_info |
| 68 import emulator | |
| 69 from pylib import ports | 65 from pylib import ports |
| 70 from pylib import run_tests_helper | 66 from pylib import run_tests_helper |
| 71 from pylib import test_options_parser | 67 from pylib import test_options_parser |
| 68 from pylib.base_test_sharder import BaseTestSharder | |
| 72 from pylib.single_test_runner import SingleTestRunner | 69 from pylib.single_test_runner import SingleTestRunner |
| 73 from pylib.test_result import BaseTestResult, TestResults | |
| 74 | 70 |
| 75 | 71 |
| 76 _TEST_SUITES = ['base_unittests', | 72 _TEST_SUITES = ['base_unittests', |
| 77 'cc_unittests', | 73 'cc_unittests', |
| 78 'content_unittests', | 74 'content_unittests', |
| 79 'gpu_unittests', | 75 'gpu_unittests', |
| 80 'ipc_tests', | 76 'ipc_tests', |
| 81 'media_unittests', | 77 'media_unittests', |
| 82 'net_unittests', | 78 'net_unittests', |
| 83 'sql_unittests', | 79 'sql_unittests', |
| 84 'sync_unit_tests', | 80 'sync_unit_tests', |
| 85 'ui_unittests', | 81 'ui_unittests', |
| 86 'unit_tests', | 82 'unit_tests', |
| 87 'webkit_compositor_bindings_unittests', | 83 'webkit_compositor_bindings_unittests', |
| 88 ] | 84 ] |
| 89 | 85 |
| 90 | 86 |
| 91 def FullyQualifiedTestSuites(exe, option_test_suite, build_type): | 87 def FullyQualifiedTestSuites(exe, option_test_suite, build_type): |
| 92 """Return a fully qualified list | 88 """Get a list of absolute paths to test suite targets. |
| 93 | 89 |
| 94 Args: | 90 Args: |
| 95 exe: if True, use the executable-based test runner. | 91 exe: if True, use the executable-based test runner. |
| 96 option_test_suite: the test_suite specified as an option. | 92 option_test_suite: the test_suite specified as an option. |
| 97 build_type: 'Release' or 'Debug'. | 93 build_type: 'Release' or 'Debug'. |
| 98 """ | 94 """ |
| 99 test_suite_dir = os.path.join(cmd_helper.OutDirectory.get(), build_type) | 95 test_suite_dir = os.path.join(cmd_helper.OutDirectory.get(), build_type) |
| 100 if option_test_suite: | 96 if option_test_suite: |
| 101 all_test_suites = [option_test_suite] | 97 all_test_suites = [option_test_suite] |
| 102 else: | 98 else: |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 182 except: | 178 except: |
| 183 pass | 179 pass |
| 184 del os.environ['DISPLAY'] | 180 del os.environ['DISPLAY'] |
| 185 self._pid = 0 | 181 self._pid = 0 |
| 186 | 182 |
| 187 | 183 |
| 188 class TestSharder(BaseTestSharder): | 184 class TestSharder(BaseTestSharder): |
| 189 """Responsible for sharding the tests on the connected devices.""" | 185 """Responsible for sharding the tests on the connected devices.""" |
| 190 | 186 |
| 191 def __init__(self, attached_devices, test_suite, gtest_filter, | 187 def __init__(self, attached_devices, test_suite, gtest_filter, |
| 192 test_arguments, timeout, rebaseline, performance_test, | 188 test_arguments, timeout, performance_test, |
| 193 cleanup_test_files, tool, log_dump_name, fast_and_loose, | 189 cleanup_test_files, tool, log_dump_name, fast_and_loose, |
| 194 build_type, in_webkit_checkout): | 190 build_type, in_webkit_checkout): |
| 195 BaseTestSharder.__init__(self, attached_devices, build_type) | 191 BaseTestSharder.__init__(self, attached_devices, build_type) |
| 196 self.test_suite = test_suite | 192 self.test_suite = test_suite |
| 197 self.test_suite_basename = os.path.basename(test_suite) | 193 self.test_suite_basename = os.path.basename(test_suite) |
| 198 self.gtest_filter = gtest_filter or '' | 194 self.gtest_filter = gtest_filter or '' |
| 199 self.test_arguments = test_arguments | 195 self.test_arguments = test_arguments |
| 200 self.timeout = timeout | 196 self.timeout = timeout |
| 201 self.rebaseline = rebaseline | |
| 202 self.performance_test = performance_test | 197 self.performance_test = performance_test |
| 203 self.cleanup_test_files = cleanup_test_files | 198 self.cleanup_test_files = cleanup_test_files |
| 204 self.tool = tool | 199 self.tool = tool |
| 205 self.log_dump_name = log_dump_name | 200 self.log_dump_name = log_dump_name |
| 206 self.fast_and_loose = fast_and_loose | 201 self.fast_and_loose = fast_and_loose |
| 207 self.build_type = build_type | |
| 208 self.in_webkit_checkout = in_webkit_checkout | 202 self.in_webkit_checkout = in_webkit_checkout |
| 209 self.tests = [] | 203 self.all_tests = [] |
| 210 if not self.gtest_filter: | 204 if not self.gtest_filter: |
| 211 # No filter has been specified, let's add all tests then. | 205 # No filter has been specified, let's add all tests then. |
| 212 self.tests, self.attached_devices = self._GetTests() | 206 self.all_tests = self._GetAllEnabledTests() |
| 207 self.tests = self.all_tests | |
|
bulach
2012/12/14 10:36:11
what is the difference between tests and all_tests
frankf
2012/12/14 19:12:41
self.tests get reassigned on retries (in base_test
| |
| 213 | 208 |
| 214 def _GetTests(self): | 209 def _GetAllEnabledTests(self): |
| 215 """Returns a tuple of (all_tests, available_devices). | 210 """Returns a list of all enabled tests. |
| 216 | 211 |
| 217 Tries to obtain the list of available tests. | 212 Obtains a list of enabled tests from the test package on the device, |
| 213 then filters it again using the diabled list on the host. | |
| 214 | |
| 218 Raises Exception if all devices failed. | 215 Raises Exception if all devices failed. |
| 219 """ | 216 """ |
| 220 available_devices = list(self.attached_devices) | 217 available_devices = list(self.attached_devices) |
| 221 while available_devices: | 218 while available_devices: |
| 222 try: | 219 try: |
| 223 logging.info('Obtaining tests from %s', available_devices[-1]) | 220 return self._GetTestsFromDevice(available_devices[-1]) |
| 224 all_tests = self._GetTestsFromDevice(available_devices[-1]) | |
| 225 return all_tests, available_devices | |
|
bulach
2012/12/14 10:36:11
this was the really important bit... :)
frankf
2012/12/14 19:12:41
As discussed offline, we need a better approach to
| |
| 226 except Exception as e: | 221 except Exception as e: |
| 227 logging.info('Failed obtaining tests from %s %s', | 222 logging.warning('Failed obtaining tests from %s %s', |
| 228 available_devices[-1], e) | 223 current_device, e) |
| 229 available_devices.pop() | 224 available_devices.pop() |
| 225 | |
| 230 raise Exception('No device available to get the list of tests.') | 226 raise Exception('No device available to get the list of tests.') |
| 231 | 227 |
| 232 def _GetTestsFromDevice(self, device): | 228 def _GetTestsFromDevice(self, device): |
| 233 test = SingleTestRunner(device, self.test_suite, self.gtest_filter, | 229 logging.info('Obtaining tests from %s', device) |
| 234 self.test_arguments, self.timeout, self.rebaseline, | 230 test_runner = SingleTestRunner( |
| 235 self.performance_test, self.cleanup_test_files, | 231 device, |
| 236 self.tool, 0, | 232 self.test_suite, |
| 237 not not self.log_dump_name, self.fast_and_loose, | 233 self.gtest_filter, |
| 238 self.build_type, self.in_webkit_checkout) | 234 self.test_arguments, |
| 235 self.timeout, | |
| 236 self.performance_test, | |
| 237 self.cleanup_test_files, | |
| 238 self.tool, | |
| 239 0, | |
| 240 not not self.log_dump_name, | |
| 241 self.fast_and_loose, | |
| 242 self.build_type, | |
| 243 self.in_webkit_checkout) | |
| 239 # The executable/apk needs to be copied before we can call GetAllTests. | 244 # The executable/apk needs to be copied before we can call GetAllTests. |
| 240 test.test_package.StripAndCopyExecutable() | 245 test_runner.test_package.StripAndCopyExecutable() |
| 241 all_tests = test.test_package.GetAllTests() | 246 all_tests = test_runner.test_package.GetAllTests() |
| 242 if not self.rebaseline: | 247 disabled_list = test_runner.GetDisabledTests() |
| 243 disabled_list = test.GetDisabledTests() | 248 # Only includes tests that do not have any match in the disabled list. |
| 244 # Only includes tests that do not have any match in the disabled list. | 249 all_tests = filter(lambda t: |
| 245 all_tests = filter(lambda t: | 250 not any([fnmatch.fnmatch(t, disabled_pattern) |
| 246 not any([fnmatch.fnmatch(t, disabled_pattern) | 251 for disabled_pattern in disabled_list]), |
| 247 for disabled_pattern in disabled_list]), | 252 all_tests) |
| 248 all_tests) | |
| 249 return all_tests | 253 return all_tests |
| 250 | 254 |
| 251 def CreateShardedTestRunner(self, device, index): | 255 def CreateShardedTestRunner(self, device, index): |
| 252 """Creates a suite-specific test runner. | 256 """Creates a suite-specific test runner. |
| 253 | 257 |
| 254 Args: | 258 Args: |
| 255 device: Device serial where this shard will run. | 259 device: Device serial where this shard will run. |
| 256 index: Index of this device in the pool. | 260 index: Index of this device in the pool. |
| 257 | 261 |
| 258 Returns: | 262 Returns: |
| 259 A SingleTestRunner object. | 263 A SingleTestRunner object. |
| 260 """ | 264 """ |
| 261 device_num = len(self.attached_devices) | 265 device_num = len(self.attached_devices) |
| 262 shard_size = (len(self.tests) + device_num - 1) / device_num | 266 shard_size = (len(self.tests) + device_num - 1) / device_num |
| 263 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size] | 267 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size] |
| 264 test_filter = ':'.join(shard_test_list) + self.gtest_filter | 268 test_filter = ':'.join(shard_test_list) + self.gtest_filter |
| 265 return SingleTestRunner(device, self.test_suite, | 269 return SingleTestRunner( |
| 266 test_filter, self.test_arguments, self.timeout, | 270 device, |
| 267 self.rebaseline, self.performance_test, | 271 self.test_suite, |
| 268 self.cleanup_test_files, self.tool, index, | 272 test_filter, |
| 269 not not self.log_dump_name, self.fast_and_loose, | 273 self.test_arguments, |
| 270 self.build_type, self.in_webkit_checkout) | 274 self.timeout, |
| 275 self.performance_test, | |
| 276 self.cleanup_test_files, self.tool, index, | |
| 277 not not self.log_dump_name, | |
| 278 self.fast_and_loose, | |
| 279 self.build_type, | |
| 280 self.in_webkit_checkout) | |
| 271 | 281 |
| 272 def OnTestsCompleted(self, test_runners, test_results): | 282 def OnTestsCompleted(self, test_runners, test_results): |
| 273 """Notifies that we completed the tests.""" | 283 """Notifies that we completed the tests.""" |
| 274 test_results.LogFull('Unit test', os.path.basename(self.test_suite), | 284 test_results.LogFull('Unit test', os.path.basename(self.test_suite), |
| 275 self.build_type, self.tests) | 285 self.build_type, self.all_tests) |
| 276 test_results.PrintAnnotation() | 286 test_results.PrintAnnotation() |
| 277 if test_results.failed and self.rebaseline: | |
| 278 test_runners[0].UpdateFilter(test_results.failed) | |
| 279 if self.log_dump_name: | 287 if self.log_dump_name: |
| 280 # Zip all debug info outputs into a file named by log_dump_name. | 288 # Zip all debug info outputs into a file named by log_dump_name. |
| 281 debug_info.GTestDebugInfo.ZipAndCleanResults( | 289 debug_info.GTestDebugInfo.ZipAndCleanResults( |
| 282 os.path.join(cmd_helper.OutDirectory.get(), self.build_type, | 290 os.path.join( |
| 291 cmd_helper.OutDirectory.get(), self.build_type, | |
| 283 'debug_info_dumps'), | 292 'debug_info_dumps'), |
| 284 self.log_dump_name) | 293 self.log_dump_name) |
| 285 | 294 |
| 286 | 295 |
| 287 def _RunATestSuite(options): | 296 def _RunATestSuite(options): |
| 288 """Run a single test suite. | 297 """Run a single test suite. |
| 289 | 298 |
| 290 Helper for Dispatch() to allow stop/restart of the emulator across | 299 Helper for Dispatch() to allow stop/restart of the emulator across |
| 291 test bundles. If using the emulator, we start it on entry and stop | 300 test bundles. If using the emulator, we start it on entry and stop |
| 292 it on exit. | 301 it on exit. |
| 293 | 302 |
| 294 Args: | 303 Args: |
| 295 options: options for running the tests. | 304 options: options for running the tests. |
| 296 | 305 |
| 297 Returns: | 306 Returns: |
| 298 0 if successful, number of failing tests otherwise. | 307 0 if successful, number of failing tests otherwise. |
| 299 """ | 308 """ |
| 300 step_name = os.path.basename(options.test_suite).replace('-debug.apk', '') | 309 step_name = os.path.basename(options.test_suite).replace('-debug.apk', '') |
| 301 buildbot_report.PrintNamedStep(step_name) | 310 buildbot_report.PrintNamedStep(step_name) |
| 302 attached_devices = [] | 311 attached_devices = [] |
| 303 buildbot_emulators = [] | 312 buildbot_emulators = [] |
| 304 | 313 |
| 305 if options.use_emulator: | 314 if options.use_emulator: |
| 306 for n in range(options.emulator_count): | 315 for n in range(options.emulator_count): |
| 307 t = TimeProfile('Emulator launch %d' % n) | 316 t = TimeProfile('Emulator launch %d' % n) |
| 308 avd_name = None | 317 avd_name = None |
| 309 if n > 0: | 318 if n > 0: |
| 310 # Creates a temporary AVD for the extra emulators. | 319 # Creates a temporary AVD for the extra emulators. |
| 311 avd_name = 'run_tests_avd_%d' % n | 320 avd_name = 'run_tests_avd_%d' % n |
| 312 buildbot_emulator = emulator.Emulator(avd_name, options.fast_and_loose) | 321 buildbot_emulator = emulator.Emulator(avd_name, options.fast_and_loose) |
| 313 buildbot_emulator.Launch(kill_all_emulators=n == 0) | 322 buildbot_emulator.Launch(kill_all_emulators=n == 0) |
| 314 t.Stop() | 323 t.Stop() |
| 315 buildbot_emulators.append(buildbot_emulator) | 324 buildbot_emulators.append(buildbot_emulator) |
| 316 attached_devices.append(buildbot_emulator.device) | 325 attached_devices.append(buildbot_emulator.device) |
| 317 # Wait for all emulators to boot completed. | 326 # Wait for all emulators to boot completed. |
| 318 map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True), | 327 map(lambda buildbot_emulator: buildbot_emulator.ConfirmLaunch(True), |
| 319 buildbot_emulators) | 328 buildbot_emulators) |
| 320 elif options.test_device: | 329 elif options.test_device: |
| 321 attached_devices = [options.test_device] | 330 attached_devices = [options.test_device] |
| 322 else: | 331 else: |
| 323 attached_devices = android_commands.GetAttachedDevices() | 332 attached_devices = android_commands.GetAttachedDevices() |
| 324 | 333 |
| 325 if not attached_devices: | 334 if not attached_devices: |
| 326 logging.critical('A device must be attached and online.') | 335 logging.critical('A device must be attached and online.') |
| 327 buildbot_report.PrintError() | 336 buildbot_report.PrintError() |
| 328 return 1 | 337 return 1 |
| 329 | 338 |
| 330 # Reset the test port allocation. It's important to do it before starting | 339 # Reset the test port allocation. It's important to do it before starting |
| 331 # to dispatch any tests. | 340 # to dispatch any tests. |
| 332 if not ports.ResetTestServerPortAllocation(): | 341 if not ports.ResetTestServerPortAllocation(): |
| 333 raise Exception('Failed to reset test server port.') | 342 raise Exception('Failed to reset test server port.') |
| 334 | 343 |
| 335 if options.performance_test or options.gtest_filter: | 344 if options.performance_test or options.gtest_filter: |
| 336 # These configuration can't be split in multiple devices. | 345 logging.warning('Sharding is not possible with these configurations.') |
| 337 attached_devices = [attached_devices[0]] | 346 attached_devices = [attached_devices[0]] |
| 338 sharder = TestSharder(attached_devices, options.test_suite, | 347 |
| 339 options.gtest_filter, options.test_arguments, | 348 sharder = TestSharder( |
| 340 options.timeout, options.rebaseline, | 349 attached_devices, |
| 341 options.performance_test, | 350 options.test_suite, |
| 342 options.cleanup_test_files, options.tool, | 351 options.gtest_filter, |
| 343 options.log_dump, options.fast_and_loose, | 352 options.test_arguments, |
| 344 options.build_type, options.webkit) | 353 options.timeout, |
| 354 options.performance_test, | |
| 355 options.cleanup_test_files, | |
| 356 options.tool, | |
| 357 options.log_dump, | |
| 358 options.fast_and_loose, | |
| 359 options.build_type, | |
| 360 options.webkit) | |
| 345 test_results = sharder.RunShardedTests() | 361 test_results = sharder.RunShardedTests() |
| 346 | 362 |
| 347 for buildbot_emulator in buildbot_emulators: | 363 for buildbot_emulator in buildbot_emulators: |
| 348 buildbot_emulator.Shutdown() | 364 buildbot_emulator.Shutdown() |
| 349 | 365 |
| 350 return len(test_results.failed) | 366 return len(test_results.failed) |
| 351 | 367 |
| 352 | 368 |
| 353 def Dispatch(options): | 369 def Dispatch(options): |
| 354 """Dispatches the tests, sharding if possible. | 370 """Dispatches the tests, sharding if possible. |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 395 option_parser = optparse.OptionParser() | 411 option_parser = optparse.OptionParser() |
| 396 test_options_parser.AddTestRunnerOptions(option_parser, default_timeout=0) | 412 test_options_parser.AddTestRunnerOptions(option_parser, default_timeout=0) |
| 397 option_parser.add_option('-s', '--suite', dest='test_suite', | 413 option_parser.add_option('-s', '--suite', dest='test_suite', |
| 398 help='Executable name of the test suite to run ' | 414 help='Executable name of the test suite to run ' |
| 399 '(use -s help to list them)') | 415 '(use -s help to list them)') |
| 400 option_parser.add_option('--out-directory', dest='out_directory', | 416 option_parser.add_option('--out-directory', dest='out_directory', |
| 401 help='Path to the out/ directory, irrespective of ' | 417 help='Path to the out/ directory, irrespective of ' |
| 402 'the build type. Only for non-Chromium uses.') | 418 'the build type. Only for non-Chromium uses.') |
| 403 option_parser.add_option('-d', '--device', dest='test_device', | 419 option_parser.add_option('-d', '--device', dest='test_device', |
| 404 help='Target device the test suite to run ') | 420 help='Target device the test suite to run ') |
| 405 option_parser.add_option('-r', dest='rebaseline', | |
| 406 help='Rebaseline and update *testsuite_disabled', | |
| 407 action='store_true') | |
| 408 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter', | 421 option_parser.add_option('-f', '--gtest_filter', dest='gtest_filter', |
| 409 help='gtest filter') | 422 help='gtest filter') |
| 410 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', | 423 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', |
| 411 help='Additional arguments to pass to the test') | 424 help='Additional arguments to pass to the test') |
| 412 option_parser.add_option('-p', dest='performance_test', | 425 option_parser.add_option('-p', dest='performance_test', |
| 413 help='Indicator of performance test', | 426 help='Indicator of performance test', |
| 414 action='store_true') | 427 action='store_true') |
| 415 option_parser.add_option('-L', dest='log_dump', | 428 option_parser.add_option('-L', dest='log_dump', |
| 416 help='file name of log dump, which will be put in ' | 429 help='file name of log dump, which will be put in ' |
| 417 'subfolder debug_info_dumps under the same ' | 430 'subfolder debug_info_dumps under the same ' |
| (...skipping 22 matching lines...) Expand all Loading... | |
| 440 default=2, | 453 default=2, |
| 441 help='Repeat count on test timeout') | 454 help='Repeat count on test timeout') |
| 442 option_parser.add_option('--exit_code', action='store_true', | 455 option_parser.add_option('--exit_code', action='store_true', |
| 443 help='If set, the exit code will be total number ' | 456 help='If set, the exit code will be total number ' |
| 444 'of failures.') | 457 'of failures.') |
| 445 option_parser.add_option('--exe', action='store_true', | 458 option_parser.add_option('--exe', action='store_true', |
| 446 help='If set, use the exe test runner instead of ' | 459 help='If set, use the exe test runner instead of ' |
| 447 'the APK.') | 460 'the APK.') |
| 448 | 461 |
| 449 options, args = option_parser.parse_args(argv) | 462 options, args = option_parser.parse_args(argv) |
| 463 | |
| 450 if len(args) > 1: | 464 if len(args) > 1: |
| 451 print 'Unknown argument:', args[1:] | 465 print 'Unknown argument:', args[1:] |
| 452 option_parser.print_usage() | 466 option_parser.print_usage() |
| 453 sys.exit(1) | 467 sys.exit(1) |
| 468 | |
| 454 run_tests_helper.SetLogLevel(options.verbose_count) | 469 run_tests_helper.SetLogLevel(options.verbose_count) |
| 470 | |
| 455 if options.out_directory: | 471 if options.out_directory: |
| 456 cmd_helper.OutDirectory.set(options.out_directory) | 472 cmd_helper.OutDirectory.set(options.out_directory) |
| 457 emulator.DeleteAllTempAVDs() | 473 |
| 474 if options.use_emulator: | |
| 475 emulator.DeleteAllTempAVDs() | |
| 476 | |
| 458 failed_tests_count = Dispatch(options) | 477 failed_tests_count = Dispatch(options) |
| 459 | 478 |
| 460 # Failures of individual test suites are communicated by printing a | 479 # Failures of individual test suites are communicated by printing a |
| 461 # STEP_FAILURE message. | 480 # STEP_FAILURE message. |
| 462 # Returning a success exit status also prevents the buildbot from incorrectly | 481 # Returning a success exit status also prevents the buildbot from incorrectly |
| 463 # marking the last suite as failed if there were failures in other suites in | 482 # marking the last suite as failed if there were failures in other suites in |
| 464 # the batch (this happens because the exit status is a sum of all failures | 483 # the batch (this happens because the exit status is a sum of all failures |
| 465 # from all suites, but the buildbot associates the exit status only with the | 484 # from all suites, but the buildbot associates the exit status only with the |
| 466 # most recent step). | 485 # most recent step). |
| 467 if options.exit_code: | 486 if options.exit_code: |
| 468 return failed_tests_count | 487 return failed_tests_count |
| 469 return 0 | 488 return 0 |
| 470 | 489 |
| 471 | 490 |
| 472 if __name__ == '__main__': | 491 if __name__ == '__main__': |
| 473 sys.exit(main(sys.argv)) | 492 sys.exit(main(sys.argv)) |
| OLD | NEW |