Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 #!/usr/bin/env python | |
| 2 # | |
| 3 # Copyright (c) 2013 The Chromium Authors. All rights reserved. | |
| 4 # Use of this source code is governed by a BSD-style license that can be | |
| 5 # found in the LICENSE file. | |
| 6 | |
| 7 """Runs all types of tests from one unified interface. | |
| 8 | |
| 9 Types of tests supported: | |
| 10 1. GTest native unit tests (gtests) | |
| 11 Example: ./run_all_tests.py gtests -s android_webview_unittests | |
| 12 2. ContentBrowser unit tests (content_browsertests) | |
| 13 Example: ./run_all_tests.py content_browsertests | |
| 14 3. Instrumentation tests (instrumentation): Both Python host-driven and | |
| 15 Java instrumentation tests are run by default. Use --python_only or | |
| 16 --java_only to select one or the other. | |
| 17 Example: ./run_all_tests.py instrumentation -I | |
| 18 --test-apk=ChromiumTestShellTest | |
| 19 4. UIAutomator tests (uiautomator): Both Python host-driven and Java | |
| 20 UIAutomator tests are run by default. Use --python_only or --java_only to | |
| 21 select one or the other. | |
| 22 Example: ./run_all_tests.py uiautomator | |
| 23 --test-jar=chromium_testshell_uiautomator_tests | |
| 24 --package-name=org.chromium.chrome.testshell | |
| 25 | |
| 26 TODO(gkanwar): | |
| 27 * Add options to run Monkey tests. | |
| 28 """ | |
| 29 | |
| 30 import optparse | |
| 31 import os | |
| 32 import sys | |
| 33 | |
| 34 from pylib import cmd_helper | |
| 35 from pylib import constants | |
| 36 from pylib import ports | |
| 37 from pylib.browsertests import dispatch as browsertests_dispatch | |
| 38 from pylib.gtest import dispatch as gtest_dispatch | |
| 39 from pylib.host_driven import run_python_tests as python_dispatch | |
| 40 from pylib.instrumentation import dispatch as instrumentation_dispatch | |
| 41 from pylib.uiautomator import dispatch as uiautomator_dispatch | |
| 42 from pylib.utils import emulator | |
| 43 from pylib.utils import run_tests_helper | |
| 44 | |
| 45 _SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out') | |
| 46 VALID_TEST_TYPES = ['gtests', 'content_browsertests', 'instrumentation', | |
| 47 'uiautomator'] | |
| 48 | |
| 49 | |
| 50 def ProcessTestTypeArg(options, args, errorf): | |
| 51 """Processes that the first arg is a valid test type keyword.""" | |
| 52 if len(args) < 2: | |
| 53 errorf('You must specify a test type.\nOptions are: ' + | |
| 54 ', '.join(VALID_TEST_TYPES)) | |
| 55 if args[1] not in VALID_TEST_TYPES: | |
| 56 errorf('Invalid test type.\nThe test type must be one of: ' + | |
| 57 ', '.join(VALID_TEST_TYPES)) | |
| 58 options.test_type = args[1] | |
| 59 | |
| 60 | |
| 61 def AddBuildTypeOption(option_container): | |
| 62 """Adds the build type option to the OptionContainer.""" | |
| 63 default_build_type = 'Debug' | |
| 64 if 'BUILDTYPE' in os.environ: | |
| 65 default_build_type = os.environ['BUILDTYPE'] | |
| 66 option_container.add_option('--debug', action='store_const', const='Debug', | |
| 67 dest='build_type', default=default_build_type, | |
| 68 help=('If set, run test suites under out/Debug. ' | |
| 69 'Default is env var BUILDTYPE or Debug.')) | |
| 70 option_container.add_option('--release', action='store_const', | |
| 71 const='Release', dest='build_type', | |
| 72 help=('If set, run test suites under out/Release.' | |
| 73 ' Default is env var BUILDTYPE or Debug.')) | |
| 74 | |
| 75 | |
| 76 def AddDeviceOptions(option_container): | |
| 77 """Adds all device-related options to the OptionContainer.""" | |
| 78 | |
| 79 # TODO(gkanwar): Figure out what we're doing with the emulator setup | |
| 80 # and determine whether these options should be deprecated/removed. | |
| 81 option_container.add_option('-d', '--device', dest='test_device', | |
| 82 help=('Target device for the test suite ' | |
| 83 'to run on.')) | |
| 84 option_container.add_option('-e', '--emulator', dest='use_emulator', | |
| 85 action='store_true', | |
| 86 help='Run tests in a new instance of emulator.') | |
| 87 option_container.add_option('-n', '--emulator-count', | |
| 88 type='int', default=1, | |
| 89 help=('Number of emulators to launch for ' | |
| 90 'running the tests.')) | |
| 91 option_container.add_option('--abi', default='armeabi-v7a', | |
| 92 help='Platform of emulators to launch.') | |
| 93 | |
| 94 | |
| 95 def ProcessDeviceOptions(options): | |
| 96 """Processes emulator and device options.""" | |
| 97 if options.use_emulator: | |
| 98 emulator.DeleteAllTempAVDs() | |
| 99 | |
| 100 | |
| 101 def AddCommonOptions(option_parser): | |
| 102 """Adds all common options to option_parser.""" | |
| 103 | |
| 104 option_group = optparse.OptionGroup(option_parser, 'Common Options', | |
| 105 'Options that apply to all test types.') | |
| 106 | |
| 107 AddBuildTypeOption(option_group) | |
| 108 | |
| 109 # --gtest_filter is DEPRECATED. Added for backwards compatibility | |
| 110 # with the syntax of the old run_tests.py script. | |
| 111 option_group.add_option('-f', '--test_filter', '--gtest_filter', | |
| 112 dest='test_filter', | |
| 113 help=('Test filter (if not fully qualified, ' | |
| 114 'will run all matches).')) | |
| 115 option_group.add_option('--out-directory', dest='out_directory', | |
| 116 help=('Path to the out/ directory, irrespective of ' | |
| 117 'the build type. Only for non-Chromium uses.')) | |
| 118 option_group.add_option('-c', dest='cleanup_test_files', | |
| 119 help='Cleanup test files on the device after run', | |
| 120 action='store_true') | |
| 121 option_group.add_option('--num_retries', dest='num_retries', type='int', | |
| 122 default=2, | |
| 123 help=('Number of retries for a test before ' | |
| 124 'giving up.')) | |
| 125 option_group.add_option('-v', | |
| 126 '--verbose', | |
| 127 dest='verbose_count', | |
| 128 default=0, | |
| 129 action='count', | |
| 130 help='Verbose level (multiple times for more)') | |
| 131 profilers = ['devicestatsmonitor', 'chrometrace', 'dumpheap', 'smaps', | |
| 132 'traceview'] | |
| 133 option_group.add_option('--profiler', dest='profilers', action='append', | |
| 134 choices=profilers, | |
| 135 help=('Profiling tool to run during test. Pass ' | |
| 136 'multiple times to run multiple profilers. ' | |
| 137 'Available profilers: %s' % profilers)) | |
| 138 option_group.add_option('--tool', | |
| 139 dest='tool', | |
| 140 help=('Run the test under a tool ' | |
| 141 '(use --tool help to list them)')) | |
| 142 option_group.add_option('--flakiness-dashboard-server', | |
| 143 dest='flakiness_dashboard_server', | |
| 144 help=('Address of the server that is hosting the ' | |
| 145 'Chrome for Android flakiness dashboard.')) | |
| 146 option_group.add_option('--skip-deps-push', dest='push_deps', | |
| 147 action='store_false', default=True, | |
| 148 help=('Do not push dependencies to the device. ' | |
| 149 'Use this at own risk for speeding up test ' | |
| 150 'execution on local machine.')) | |
| 151 # TODO(gkanwar): This option is deprecated. Remove it in the future. | |
| 152 option_group.add_option('--exit-code', action='store_true', | |
| 153 help=('(DEPRECATED) If set, the exit code will be ' | |
| 154 'total number of failures.')) | |
| 155 # TODO(gkanwar): This option is deprecated. It is currently used to run tests | |
| 156 # with the FlakyTest annotation to prevent the bots going red downstream. See | |
| 157 # crbug.com/147176. We should remove this option once FlakyTests are removed | |
| 158 # from the general test script. | |
| 159 option_group.add_option('--buildbot-step-failure', | |
| 160 action='store_true', | |
| 161 help=('(DEPRECATED) If present, will set the ' | |
| 162 'buildbot status as STEP_FAILURE, otherwise as ' | |
| 163 'STEP_WARNINGS when test(s) fail.')) | |
| 164 option_parser.add_option_group(option_group) | |
| 165 | |
| 166 | |
| 167 def ProcessCommonOptions(options): | |
| 168 """Processes and handles all common options.""" | |
| 169 if options.out_directory: | |
| 170 cmd_helper.OutDirectory.set(options.out_directory) | |
| 171 run_tests_helper.SetLogLevel(options.verbose_count) | |
| 172 | |
| 173 | |
| 174 def AddContentBrowserOptions(option_parser): | |
| 175 """Adds an option group to option_parser for Content Browser tests. | |
| 176 | |
| 177 There are currently no options to be added for Content Browser tests, but we | |
| 178 create an option group anyway so that we can add an example for Content | |
| 179 Browser tests. | |
| 180 """ | |
| 181 | |
| 182 option_group = optparse.OptionGroup( | |
| 183 option_parser, 'Content Browser Test Options', 'There are no Content ' | |
| 184 'Browser specific options currently. Example usage: ./run_all_tests.py ' | |
|
frankf
2013/06/27 23:13:35
Remove this all together.
| |
| 185 'content_browsertests') | |
| 186 option_parser.add_option_group(option_group) | |
| 187 | |
| 188 | |
| 189 def AddGTestOptions(option_parser, default_timeout=60): | |
| 190 """Adds gtest options to option_parser.""" | |
| 191 | |
| 192 option_group = optparse.OptionGroup( | |
| 193 option_parser, 'GTest Options', 'Use these options to choose which ' | |
| 194 'test suites to run and how. Example usage: ./run_all_tests.py gtests -s ' | |
| 195 'android_webview_unittests') | |
| 196 | |
| 197 option_group.add_option('-s', '--suite', dest='test_suite', | |
| 198 help=('Executable name of the test suite to run ' | |
| 199 '(use -s help to list them).')) | |
| 200 option_group.add_option('-a', '--test_arguments', dest='test_arguments', | |
| 201 help='Additional arguments to pass to the test.') | |
| 202 # TODO(gkanwar): Most likely deprecate/remove this option once we've pinned | |
| 203 # down what we're doing with the emulator setup. | |
| 204 option_group.add_option('-x', '--xvfb', dest='use_xvfb', | |
| 205 action='store_true', | |
| 206 help='Use Xvfb around tests (ignored if not Linux).') | |
| 207 # TODO(gkanwar): Possible deprecate this flag. Waiting on word from Peter | |
| 208 # Beverloo. | |
| 209 option_group.add_option('--webkit', action='store_true', | |
| 210 help='Run the tests from a WebKit checkout.') | |
| 211 option_group.add_option('--exe', action='store_true', | |
| 212 help='If set, use the exe test runner instead of ' | |
| 213 'the APK.') | |
| 214 | |
| 215 # TODO(gkanwar): Move these to Common Options once we have the plumbing | |
| 216 # in our other test types to handle these commands | |
| 217 AddDeviceOptions(option_group) | |
| 218 option_group.add_option('-t', dest='timeout', | |
| 219 help='Timeout to wait for each test', | |
| 220 type='int', | |
| 221 default=default_timeout) | |
| 222 | |
| 223 option_parser.add_option_group(option_group) | |
| 224 | |
| 225 | |
| 226 def AddJavaTestOptions(option_parser): | |
| 227 """Adds the Java test options to option_parser.""" | |
| 228 | |
| 229 option_group = optparse.OptionGroup(option_parser, | |
| 230 'Java Test Options', | |
| 231 'Options for both Instrumentation and ' | |
| 232 'UIAutomator tests. These types also ' | |
| 233 'have their own specific options (see ' | |
| 234 'below).') | |
| 235 option_group.add_option( | |
| 236 '-A', '--annotation', dest='annotation_str', | |
| 237 help=('Comma-separated list of annotations. Run only tests with any of ' | |
| 238 'the given annotations. An annotation can be either a key or a ' | |
| 239 'key-values pair. A test that has no annotation is considered ' | |
| 240 '"SmallTest".')) | |
| 241 option_group.add_option( | |
| 242 '-E', '--exclude-annotation', dest='exclude_annotation_str', | |
| 243 help=('Comma-separated list of annotations. Exclude tests with these ' | |
| 244 'annotations.')) | |
| 245 option_group.add_option('-j', '--java_only', action='store_true', | |
| 246 default=False, help='Run only the Java tests.') | |
| 247 option_group.add_option('-p', '--python_only', action='store_true', | |
| 248 default=False, help='Run only the host-driven tests.') | |
| 249 option_group.add_option('--screenshot', dest='screenshot_failures', | |
| 250 action='store_true', | |
| 251 help='Capture screenshots of test failures') | |
| 252 option_group.add_option('--save-perf-json', action='store_true', | |
| 253 help='Saves the JSON file for each UI Perf test.') | |
| 254 # TODO(gkanwar): Remove this option. It is not used anywhere. | |
| 255 option_group.add_option('--shard_retries', type=int, default=1, | |
| 256 help=('Number of times to retry each failure when ' | |
| 257 'sharding.')) | |
| 258 option_group.add_option('--official-build', help='Run official build tests.') | |
| 259 option_group.add_option('--python_test_root', | |
| 260 help='Root of the host-driven tests.') | |
| 261 option_group.add_option('--keep_test_server_ports', | |
| 262 action='store_true', | |
| 263 help=('Indicates the test server ports must be ' | |
| 264 'kept. When this is run via a sharder ' | |
| 265 'the test server ports should be kept and ' | |
| 266 'should not be reset.')) | |
| 267 # TODO(gkanwar): This option is deprecated. Remove it in the future. | |
| 268 option_group.add_option('--disable_assertions', action='store_true', | |
| 269 help=('(DEPRECATED) Run with java assertions ' | |
| 270 'disabled.')) | |
| 271 option_group.add_option('--test_data', action='append', default=[], | |
| 272 help=('Each instance defines a directory of test ' | |
| 273 'data that should be copied to the target(s) ' | |
| 274 'before running the tests. The argument ' | |
| 275 'should be of the form <target>:<source>, ' | |
| 276 '<target> is relative to the device data' | |
| 277 'directory, and <source> is relative to the ' | |
| 278 'chromium build directory.')) | |
| 279 | |
| 280 option_parser.add_option_group(option_group) | |
| 281 | |
| 282 | |
| 283 def ProcessJavaTestOptions(options, errorf): | |
| 284 """Processes options/arguments and populates options with defaults.""" | |
| 285 | |
| 286 if options.java_only and options.python_only: | |
| 287 errorf('Options java_only (-j) and python_only (-p) ' | |
| 288 'are mutually exclusive.') | |
| 289 options.run_java_tests = True | |
| 290 options.run_python_tests = True | |
| 291 if options.java_only: | |
| 292 options.run_python_tests = False | |
| 293 elif options.python_only: | |
| 294 options.run_java_tests = False | |
| 295 | |
| 296 if not options.python_test_root: | |
| 297 options.run_python_tests = False | |
| 298 | |
| 299 if options.annotation_str: | |
| 300 options.annotations = options.annotation_str.split(',') | |
| 301 elif options.test_filter: | |
| 302 options.annotations = [] | |
| 303 else: | |
| 304 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest'] | |
| 305 | |
| 306 if options.exclude_annotation_str: | |
| 307 options.exclude_annotations = options.exclude_annotation_str.split(',') | |
| 308 else: | |
| 309 options.exclude_annotations = [] | |
| 310 | |
| 311 if not options.keep_test_server_ports: | |
| 312 if not ports.ResetTestServerPortAllocation(): | |
| 313 raise Exception('Failed to reset test server port.') | |
| 314 | |
| 315 | |
| 316 def AddInstrumentationOptions(option_parser): | |
| 317 """Adds Instrumentation test options to option_parser.""" | |
| 318 option_group = optparse.OptionGroup( | |
| 319 option_parser, 'Instrumentation Test Options', | |
| 320 'Options specific to Instrumentation tests (the Java options apply to ' | |
| 321 'both Instrumentation and UIAutomator tests). Example usage: ' | |
| 322 './run_all_tests.py instrumentation -I --test-apk=ChromiumTestShellTest') | |
| 323 | |
| 324 option_group.add_option('-w', '--wait_debugger', dest='wait_for_debugger', | |
| 325 action='store_true', | |
| 326 help='Wait for debugger.') | |
| 327 option_group.add_option('-I', dest='install_apk', action='store_true', | |
| 328 help='Install test APK.') | |
| 329 option_group.add_option( | |
| 330 '--test-apk', dest='test_apk', | |
| 331 help=('The name of the apk containing the tests ' | |
| 332 '(without the .apk extension; e.g. "ContentShellTest"). ' | |
| 333 'Alternatively, this can be a full path to the apk.')) | |
| 334 option_parser.add_option_group(option_group) | |
| 335 | |
| 336 | |
| 337 def ProcessInstrumentationOptions(options, errorf): | |
| 338 """Processes options/arguments and populate options with defaults.""" | |
| 339 | |
| 340 ProcessJavaTestOptions(options, errorf) | |
| 341 | |
| 342 if not options.test_apk: | |
| 343 errorf('--test-apk must be specified.') | |
| 344 | |
| 345 if os.path.exists(options.test_apk): | |
| 346 # The APK is fully qualified, assume the JAR lives along side. | |
| 347 options.test_apk_path = options.test_apk | |
| 348 options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] + | |
| 349 '.jar') | |
| 350 else: | |
| 351 options.test_apk_path = os.path.join(_SDK_OUT_DIR, | |
| 352 options.build_type, | |
| 353 constants.SDK_BUILD_APKS_DIR, | |
| 354 '%s.apk' % options.test_apk) | |
| 355 options.test_apk_jar_path = os.path.join( | |
| 356 _SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_TEST_JAVALIB_DIR, | |
| 357 '%s.jar' % options.test_apk) | |
| 358 | |
| 359 | |
| 360 def AddUIAutomatorOptions(option_parser): | |
| 361 """Adds UI Automator test options to option_parser.""" | |
| 362 option_group = optparse.OptionGroup( | |
| 363 option_parser, 'UIAutomator Test Options', | |
| 364 'Options specific to UIAutomator tests (the Java options apply to both ' | |
| 365 'Instrumentation and UIAutomator tests). Example usage: ' | |
| 366 './run_all_tests.py uiautomator ' | |
| 367 '--test-jar=chromium_testshell_uiautomator_tests ' | |
| 368 '--package-name=org.chromium.chrome.testshell') | |
| 369 option_group.add_option( | |
| 370 '--package-name', | |
| 371 help='The package name used by the apk containing the application.') | |
| 372 option_group.add_option( | |
| 373 '--test-jar', dest='test_jar', | |
| 374 help=('The name of the dexed jar containing the tests (without the ' | |
| 375 '.dex.jar extension). Alternatively, this can be a full path ' | |
| 376 'to the jar.')) | |
| 377 option_parser.add_option_group(option_group) | |
| 378 | |
| 379 | |
| 380 def ProcessUIAutomatorOptions(options, errorf): | |
| 381 """Processes UIAutomator options/arguments.""" | |
| 382 | |
| 383 ProcessJavaTestOptions(options, errorf) | |
| 384 | |
| 385 if not options.package_name: | |
| 386 errorf('--package-name must be specified.') | |
| 387 | |
| 388 if not options.test_jar: | |
| 389 errorf('--test-jar must be specified.') | |
| 390 | |
| 391 if os.path.exists(options.test_jar): | |
| 392 # The dexed JAR is fully qualified, assume the info JAR lives along side. | |
| 393 options.uiautomator_jar = options.test_jar | |
| 394 else: | |
| 395 options.uiautomator_jar = os.path.join( | |
| 396 _SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_JAVALIB_DIR, | |
| 397 '%s.dex.jar' % options.test_jar) | |
| 398 options.uiautomator_info_jar = ( | |
| 399 options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] + | |
| 400 '_java.jar') | |
| 401 | |
| 402 | |
| 403 def RunTests(options, option_parser): | |
| 404 """Checks test type and dispatches to the appropriate function.""" | |
| 405 | |
| 406 total_failed = 0 | |
| 407 if options.test_type == 'gtests': | |
| 408 # TODO(gkanwar): Once we move Device Options to Common, this should get | |
| 409 # moved out into main. | |
| 410 ProcessDeviceOptions(options) | |
| 411 total_failed = gtest_dispatch.Dispatch(options) | |
| 412 elif options.test_type == 'content_browsertests': | |
| 413 total_failed = browsertests_dispatch.Dispatch(options) | |
| 414 elif options.test_type == 'instrumentation': | |
| 415 ProcessInstrumentationOptions(options, option_parser.error) | |
| 416 if options.run_java_tests: | |
| 417 total_failed += instrumentation_dispatch.Dispatch(options) | |
| 418 if options.run_python_tests: | |
| 419 total_failed += python_dispatch.Dispatch(options) | |
| 420 elif options.test_type == 'uiautomator': | |
| 421 ProcessUIAutomatorOptions(options, option_parser.error) | |
| 422 if options.run_java_tests: | |
| 423 total_failed += uiautomator_dispatch.Dispatch(options) | |
| 424 if options.run_python_tests: | |
| 425 total_failed += python_dispatch.Dispatch(options) | |
| 426 else: | |
| 427 raise Exception('Unknown test type state') | |
| 428 | |
| 429 return total_failed | |
| 430 | |
| 431 | |
| 432 def main(argv): | |
| 433 option_parser = optparse.OptionParser( | |
| 434 usage='Usage: %prog test_type [options]', | |
| 435 description='Valid test types are: ' + ', '.join(VALID_TEST_TYPES)) | |
| 436 # TODO(gkanwar): Use a 'help' command to break up the current help text | |
| 437 # wall-of-text. For example, running ./run_all_tests.py help uiautomator | |
| 438 # would give all options which apply to UIAutomator tests. General help | |
| 439 # would only display the list of possible commands (the test types and | |
| 440 # the help command). | |
| 441 AddCommonOptions(option_parser) | |
| 442 AddContentBrowserOptions(option_parser) | |
| 443 AddGTestOptions(option_parser) | |
| 444 AddJavaTestOptions(option_parser) | |
| 445 AddInstrumentationOptions(option_parser) | |
| 446 AddUIAutomatorOptions(option_parser) | |
| 447 options, args = option_parser.parse_args(argv) | |
| 448 | |
| 449 ProcessTestTypeArg(options, args, option_parser.error) | |
| 450 ProcessCommonOptions(options) | |
| 451 | |
| 452 failed_tests_count = RunTests(options, option_parser) | |
| 453 | |
| 454 # Failures of individual test suites are communicated by printing a | |
| 455 # STEP_FAILURE message. | |
| 456 # Returning a success exit status also prevents the buildbot from incorrectly | |
| 457 # marking the last suite as failed if there were failures in other suites in | |
| 458 # the batch (this happens because the exit status is a sum of all failures | |
| 459 # from all suites, but the buildbot associates the exit status only with the | |
| 460 # most recent step). | |
| 461 if options.exit_code: | |
| 462 return failed_tests_count | |
| 463 return 0 | |
| 464 | |
| 465 | |
| 466 if __name__ == '__main__': | |
| 467 sys.exit(main(sys.argv)) | |
| OLD | NEW |