| OLD | NEW |
| (Empty) |
| 1 #!/usr/bin/env python | |
| 2 # | |
| 3 # Copyright 2013 The Chromium Authors. All rights reserved. | |
| 4 # Use of this source code is governed by a BSD-style license that can be | |
| 5 # found in the LICENSE file. | |
| 6 | |
| 7 """Runs all types of tests from one unified interface. | |
| 8 | |
| 9 TODO(gkanwar): | |
| 10 * Add options to run Monkey tests. | |
| 11 """ | |
| 12 | |
| 13 import collections | |
| 14 import optparse | |
| 15 import os | |
| 16 import sys | |
| 17 | |
| 18 from pylib import cmd_helper | |
| 19 from pylib import constants | |
| 20 from pylib import ports | |
| 21 from pylib.base import base_test_result | |
| 22 from pylib.browsertests import dispatch as browsertests_dispatch | |
| 23 from pylib.gtest import dispatch as gtest_dispatch | |
| 24 from pylib.host_driven import run_python_tests as python_dispatch | |
| 25 from pylib.instrumentation import dispatch as instrumentation_dispatch | |
| 26 from pylib.uiautomator import dispatch as uiautomator_dispatch | |
| 27 from pylib.utils import emulator, report_results, run_tests_helper | |
| 28 | |
| 29 | |
| 30 _SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out') | |
| 31 | |
| 32 | |
| 33 def AddBuildTypeOption(option_parser): | |
| 34 """Adds the build type option to |option_parser|.""" | |
| 35 default_build_type = 'Debug' | |
| 36 if 'BUILDTYPE' in os.environ: | |
| 37 default_build_type = os.environ['BUILDTYPE'] | |
| 38 option_parser.add_option('--debug', action='store_const', const='Debug', | |
| 39 dest='build_type', default=default_build_type, | |
| 40 help=('If set, run test suites under out/Debug. ' | |
| 41 'Default is env var BUILDTYPE or Debug.')) | |
| 42 option_parser.add_option('--release', action='store_const', | |
| 43 const='Release', dest='build_type', | |
| 44 help=('If set, run test suites under out/Release.' | |
| 45 ' Default is env var BUILDTYPE or Debug.')) | |
| 46 | |
| 47 | |
| 48 def AddEmulatorOptions(option_parser): | |
| 49 """Adds all emulator-related options to |option_parser|.""" | |
| 50 | |
| 51 # TODO(gkanwar): Figure out what we're doing with the emulator setup | |
| 52 # and determine whether these options should be deprecated/removed. | |
| 53 option_parser.add_option('-e', '--emulator', dest='use_emulator', | |
| 54 action='store_true', | |
| 55 help='Run tests in a new instance of emulator.') | |
| 56 option_parser.add_option('-n', '--emulator-count', | |
| 57 type='int', default=1, | |
| 58 help=('Number of emulators to launch for ' | |
| 59 'running the tests.')) | |
| 60 option_parser.add_option('--abi', default='armeabi-v7a', | |
| 61 help='Platform of emulators to launch.') | |
| 62 | |
| 63 | |
| 64 def ProcessEmulatorOptions(options): | |
| 65 """Processes emulator options.""" | |
| 66 if options.use_emulator: | |
| 67 emulator.DeleteAllTempAVDs() | |
| 68 | |
| 69 | |
| 70 def AddCommonOptions(option_parser): | |
| 71 """Adds all common options to |option_parser|.""" | |
| 72 | |
| 73 AddBuildTypeOption(option_parser) | |
| 74 | |
| 75 option_parser.add_option('--out-directory', dest='out_directory', | |
| 76 help=('Path to the out/ directory, irrespective of ' | |
| 77 'the build type. Only for non-Chromium uses.')) | |
| 78 option_parser.add_option('-c', dest='cleanup_test_files', | |
| 79 help='Cleanup test files on the device after run', | |
| 80 action='store_true') | |
| 81 option_parser.add_option('--num_retries', dest='num_retries', type='int', | |
| 82 default=2, | |
| 83 help=('Number of retries for a test before ' | |
| 84 'giving up.')) | |
| 85 option_parser.add_option('-v', | |
| 86 '--verbose', | |
| 87 dest='verbose_count', | |
| 88 default=0, | |
| 89 action='count', | |
| 90 help='Verbose level (multiple times for more)') | |
| 91 profilers = ['devicestatsmonitor', 'chrometrace', 'dumpheap', 'smaps', | |
| 92 'traceview'] | |
| 93 option_parser.add_option('--profiler', dest='profilers', action='append', | |
| 94 choices=profilers, | |
| 95 help=('Profiling tool to run during test. Pass ' | |
| 96 'multiple times to run multiple profilers. ' | |
| 97 'Available profilers: %s' % profilers)) | |
| 98 option_parser.add_option('--tool', | |
| 99 dest='tool', | |
| 100 help=('Run the test under a tool ' | |
| 101 '(use --tool help to list them)')) | |
| 102 option_parser.add_option('--flakiness-dashboard-server', | |
| 103 dest='flakiness_dashboard_server', | |
| 104 help=('Address of the server that is hosting the ' | |
| 105 'Chrome for Android flakiness dashboard.')) | |
| 106 option_parser.add_option('--skip-deps-push', dest='push_deps', | |
| 107 action='store_false', default=True, | |
| 108 help=('Do not push dependencies to the device. ' | |
| 109 'Use this at own risk for speeding up test ' | |
| 110 'execution on local machine.')) | |
| 111 # TODO(gkanwar): This option is deprecated. Remove it in the future. | |
| 112 option_parser.add_option('--exit-code', action='store_true', | |
| 113 help=('(DEPRECATED) If set, the exit code will be ' | |
| 114 'total number of failures.')) | |
| 115 # TODO(gkanwar): This option is deprecated. It is currently used to run tests | |
| 116 # with the FlakyTest annotation to prevent the bots going red downstream. We | |
| 117 # should instead use exit codes and let the Buildbot scripts deal with test | |
| 118 # failures appropriately. See crbug.com/170477. | |
| 119 option_parser.add_option('--buildbot-step-failure', | |
| 120 action='store_true', | |
| 121 help=('(DEPRECATED) If present, will set the ' | |
| 122 'buildbot status as STEP_FAILURE, otherwise ' | |
| 123 'as STEP_WARNINGS when test(s) fail.')) | |
| 124 option_parser.add_option('-d', '--device', dest='test_device', | |
| 125 help=('Target device for the test suite ' | |
| 126 'to run on.')) | |
| 127 | |
| 128 | |
| 129 def ProcessCommonOptions(options): | |
| 130 """Processes and handles all common options.""" | |
| 131 if options.out_directory: | |
| 132 cmd_helper.OutDirectory.set(options.out_directory) | |
| 133 run_tests_helper.SetLogLevel(options.verbose_count) | |
| 134 | |
| 135 | |
| 136 def AddContentBrowserTestOptions(option_parser): | |
| 137 """Adds Content Browser test options to |option_parser|.""" | |
| 138 | |
| 139 option_parser.usage = '%prog content_browsertests [options]' | |
| 140 option_parser.command_list = [] | |
| 141 option_parser.example = '%prog content_browsertests' | |
| 142 | |
| 143 AddCommonOptions(option_parser) | |
| 144 | |
| 145 # TODO(gkanwar): Consolidate and clean up test filtering for gtests and | |
| 146 # content_browsertests. | |
| 147 option_parser.add_option('--gtest_filter', dest='test_filter', | |
| 148 help='Filter GTests by name.') | |
| 149 | |
| 150 | |
| 151 def AddGTestOptions(option_parser, default_timeout=60): | |
| 152 """Adds gtest options to |option_parser|.""" | |
| 153 | |
| 154 option_parser.usage = '%prog gtest [options]' | |
| 155 option_parser.command_list = [] | |
| 156 option_parser.example = '%prog gtest -s base_unittests' | |
| 157 | |
| 158 # TODO(gkanwar): Consolidate and clean up test filtering for gtests and | |
| 159 # content_browsertests. | |
| 160 option_parser.add_option('--gtest_filter', dest='test_filter', | |
| 161 help='Filter GTests by name.') | |
| 162 option_parser.add_option('-s', '--suite', dest='test_suite', | |
| 163 help=('Executable name of the test suite to run ' | |
| 164 '(use -s help to list them).')) | |
| 165 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', | |
| 166 help='Additional arguments to pass to the test.') | |
| 167 # TODO(gkanwar): Most likely deprecate/remove this option once we've pinned | |
| 168 # down what we're doing with the emulator setup. | |
| 169 option_parser.add_option('-x', '--xvfb', dest='use_xvfb', | |
| 170 action='store_true', | |
| 171 help='Use Xvfb around tests (ignored if not Linux).') | |
| 172 # TODO(gkanwar): Possible deprecate this flag. Waiting on word from Peter | |
| 173 # Beverloo. | |
| 174 option_parser.add_option('--webkit', action='store_true', | |
| 175 help='Run the tests from a WebKit checkout.') | |
| 176 option_parser.add_option('--exe', action='store_true', | |
| 177 help='If set, use the exe test runner instead of ' | |
| 178 'the APK.') | |
| 179 option_parser.add_option('-t', dest='timeout', | |
| 180 help='Timeout to wait for each test', | |
| 181 type='int', | |
| 182 default=default_timeout) | |
| 183 | |
| 184 # TODO(gkanwar): Move these to Common Options once we have the plumbing | |
| 185 # in our other test types to handle these commands | |
| 186 AddEmulatorOptions(option_parser) | |
| 187 AddCommonOptions(option_parser) | |
| 188 | |
| 189 | |
| 190 def AddJavaTestOptions(option_parser): | |
| 191 """Adds the Java test options to |option_parser|.""" | |
| 192 | |
| 193 option_parser.add_option('-f', '--test_filter', dest='test_filter', | |
| 194 help=('Test filter (if not fully qualified, ' | |
| 195 'will run all matches).')) | |
| 196 option_parser.add_option( | |
| 197 '-A', '--annotation', dest='annotation_str', | |
| 198 help=('Comma-separated list of annotations. Run only tests with any of ' | |
| 199 'the given annotations. An annotation can be either a key or a ' | |
| 200 'key-values pair. A test that has no annotation is considered ' | |
| 201 '"SmallTest".')) | |
| 202 option_parser.add_option( | |
| 203 '-E', '--exclude-annotation', dest='exclude_annotation_str', | |
| 204 help=('Comma-separated list of annotations. Exclude tests with these ' | |
| 205 'annotations.')) | |
| 206 option_parser.add_option('-j', '--java_only', action='store_true', | |
| 207 default=False, help='Run only the Java tests.') | |
| 208 option_parser.add_option('-p', '--python_only', action='store_true', | |
| 209 default=False, | |
| 210 help='Run only the host-driven tests.') | |
| 211 option_parser.add_option('--screenshot', dest='screenshot_failures', | |
| 212 action='store_true', | |
| 213 help='Capture screenshots of test failures') | |
| 214 option_parser.add_option('--save-perf-json', action='store_true', | |
| 215 help='Saves the JSON file for each UI Perf test.') | |
| 216 # TODO(gkanwar): Remove this option. It is not used anywhere. | |
| 217 option_parser.add_option('--shard_retries', type=int, default=1, | |
| 218 help=('Number of times to retry each failure when ' | |
| 219 'sharding.')) | |
| 220 option_parser.add_option('--official-build', help='Run official build tests.') | |
| 221 option_parser.add_option('--python_test_root', | |
| 222 help='Root of the host-driven tests.') | |
| 223 option_parser.add_option('--keep_test_server_ports', | |
| 224 action='store_true', | |
| 225 help=('Indicates the test server ports must be ' | |
| 226 'kept. When this is run via a sharder ' | |
| 227 'the test server ports should be kept and ' | |
| 228 'should not be reset.')) | |
| 229 # TODO(gkanwar): This option is deprecated. Remove it in the future. | |
| 230 option_parser.add_option('--disable_assertions', action='store_true', | |
| 231 help=('(DEPRECATED) Run with java assertions ' | |
| 232 'disabled.')) | |
| 233 option_parser.add_option('--test_data', action='append', default=[], | |
| 234 help=('Each instance defines a directory of test ' | |
| 235 'data that should be copied to the target(s) ' | |
| 236 'before running the tests. The argument ' | |
| 237 'should be of the form <target>:<source>, ' | |
| 238 '<target> is relative to the device data' | |
| 239 'directory, and <source> is relative to the ' | |
| 240 'chromium build directory.')) | |
| 241 | |
| 242 | |
| 243 def ProcessJavaTestOptions(options, error_func): | |
| 244 """Processes options/arguments and populates |options| with defaults.""" | |
| 245 | |
| 246 if options.java_only and options.python_only: | |
| 247 error_func('Options java_only (-j) and python_only (-p) ' | |
| 248 'are mutually exclusive.') | |
| 249 options.run_java_tests = True | |
| 250 options.run_python_tests = True | |
| 251 if options.java_only: | |
| 252 options.run_python_tests = False | |
| 253 elif options.python_only: | |
| 254 options.run_java_tests = False | |
| 255 | |
| 256 if not options.python_test_root: | |
| 257 options.run_python_tests = False | |
| 258 | |
| 259 if options.annotation_str: | |
| 260 options.annotations = options.annotation_str.split(',') | |
| 261 elif options.test_filter: | |
| 262 options.annotations = [] | |
| 263 else: | |
| 264 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest'] | |
| 265 | |
| 266 if options.exclude_annotation_str: | |
| 267 options.exclude_annotations = options.exclude_annotation_str.split(',') | |
| 268 else: | |
| 269 options.exclude_annotations = [] | |
| 270 | |
| 271 if not options.keep_test_server_ports: | |
| 272 if not ports.ResetTestServerPortAllocation(): | |
| 273 raise Exception('Failed to reset test server port.') | |
| 274 | |
| 275 | |
| 276 def AddInstrumentationTestOptions(option_parser): | |
| 277 """Adds Instrumentation test options to |option_parser|.""" | |
| 278 | |
| 279 option_parser.usage = '%prog instrumentation [options]' | |
| 280 option_parser.command_list = [] | |
| 281 option_parser.example = ('%prog instrumentation -I ' | |
| 282 '--test-apk=ChromiumTestShellTest') | |
| 283 | |
| 284 AddJavaTestOptions(option_parser) | |
| 285 AddCommonOptions(option_parser) | |
| 286 | |
| 287 option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger', | |
| 288 action='store_true', | |
| 289 help='Wait for debugger.') | |
| 290 option_parser.add_option('-I', dest='install_apk', action='store_true', | |
| 291 help='Install test APK.') | |
| 292 option_parser.add_option( | |
| 293 '--test-apk', dest='test_apk', | |
| 294 help=('The name of the apk containing the tests ' | |
| 295 '(without the .apk extension; e.g. "ContentShellTest"). ' | |
| 296 'Alternatively, this can be a full path to the apk.')) | |
| 297 | |
| 298 | |
| 299 def ProcessInstrumentationOptions(options, error_func): | |
| 300 """Processes options/arguments and populate |options| with defaults.""" | |
| 301 | |
| 302 ProcessJavaTestOptions(options, error_func) | |
| 303 | |
| 304 if not options.test_apk: | |
| 305 error_func('--test-apk must be specified.') | |
| 306 | |
| 307 if os.path.exists(options.test_apk): | |
| 308 # The APK is fully qualified, assume the JAR lives along side. | |
| 309 options.test_apk_path = options.test_apk | |
| 310 options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] + | |
| 311 '.jar') | |
| 312 else: | |
| 313 options.test_apk_path = os.path.join(_SDK_OUT_DIR, | |
| 314 options.build_type, | |
| 315 constants.SDK_BUILD_APKS_DIR, | |
| 316 '%s.apk' % options.test_apk) | |
| 317 options.test_apk_jar_path = os.path.join( | |
| 318 _SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_TEST_JAVALIB_DIR, | |
| 319 '%s.jar' % options.test_apk) | |
| 320 | |
| 321 | |
| 322 def AddUIAutomatorTestOptions(option_parser): | |
| 323 """Adds UI Automator test options to |option_parser|.""" | |
| 324 | |
| 325 option_parser.usage = '%prog uiautomator [options]' | |
| 326 option_parser.command_list = [] | |
| 327 option_parser.example = ( | |
| 328 '%prog uiautomator --test-jar=chromium_testshell_uiautomator_tests' | |
| 329 ' --package-name=org.chromium.chrome.testshell') | |
| 330 option_parser.add_option( | |
| 331 '--package-name', | |
| 332 help='The package name used by the apk containing the application.') | |
| 333 option_parser.add_option( | |
| 334 '--test-jar', dest='test_jar', | |
| 335 help=('The name of the dexed jar containing the tests (without the ' | |
| 336 '.dex.jar extension). Alternatively, this can be a full path ' | |
| 337 'to the jar.')) | |
| 338 | |
| 339 AddJavaTestOptions(option_parser) | |
| 340 AddCommonOptions(option_parser) | |
| 341 | |
| 342 | |
| 343 def ProcessUIAutomatorOptions(options, error_func): | |
| 344 """Processes UIAutomator options/arguments.""" | |
| 345 | |
| 346 ProcessJavaTestOptions(options, error_func) | |
| 347 | |
| 348 if not options.package_name: | |
| 349 error_func('--package-name must be specified.') | |
| 350 | |
| 351 if not options.test_jar: | |
| 352 error_func('--test-jar must be specified.') | |
| 353 | |
| 354 if os.path.exists(options.test_jar): | |
| 355 # The dexed JAR is fully qualified, assume the info JAR lives along side. | |
| 356 options.uiautomator_jar = options.test_jar | |
| 357 else: | |
| 358 options.uiautomator_jar = os.path.join( | |
| 359 _SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_JAVALIB_DIR, | |
| 360 '%s.dex.jar' % options.test_jar) | |
| 361 options.uiautomator_info_jar = ( | |
| 362 options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] + | |
| 363 '_java.jar') | |
| 364 | |
| 365 | |
| 366 def RunTestsCommand(command, options, args, option_parser): | |
| 367 """Checks test type and dispatches to the appropriate function. | |
| 368 | |
| 369 Args: | |
| 370 command: String indicating the command that was received to trigger | |
| 371 this function. | |
| 372 options: optparse options dictionary. | |
| 373 args: List of extra args from optparse. | |
| 374 option_parser: optparse.OptionParser object. | |
| 375 | |
| 376 Returns: | |
| 377 Integer indicated exit code. | |
| 378 """ | |
| 379 | |
| 380 ProcessCommonOptions(options) | |
| 381 | |
| 382 total_failed = 0 | |
| 383 if command == 'gtest': | |
| 384 # TODO(gkanwar): See the emulator TODO above -- this call should either go | |
| 385 # away or become generalized. | |
| 386 ProcessEmulatorOptions(options) | |
| 387 total_failed = gtest_dispatch.Dispatch(options) | |
| 388 elif command == 'content_browsertests': | |
| 389 total_failed = browsertests_dispatch.Dispatch(options) | |
| 390 elif command == 'instrumentation': | |
| 391 ProcessInstrumentationOptions(options, option_parser.error) | |
| 392 results = base_test_result.TestRunResults() | |
| 393 if options.run_java_tests: | |
| 394 results.AddTestRunResults(instrumentation_dispatch.Dispatch(options)) | |
| 395 if options.run_python_tests: | |
| 396 results.AddTestRunResults(python_dispatch.DispatchPythonTests(options)) | |
| 397 report_results.LogFull( | |
| 398 results=results, | |
| 399 test_type='Instrumentation', | |
| 400 test_package=os.path.basename(options.test_apk), | |
| 401 annotation=options.annotations, | |
| 402 build_type=options.build_type, | |
| 403 flakiness_server=options.flakiness_dashboard_server) | |
| 404 total_failed += len(results.GetNotPass()) | |
| 405 elif command == 'uiautomator': | |
| 406 ProcessUIAutomatorOptions(options, option_parser.error) | |
| 407 results = base_test_result.TestRunResults() | |
| 408 if options.run_java_tests: | |
| 409 results.AddTestRunResults(uiautomator_dispatch.Dispatch(options)) | |
| 410 if options.run_python_tests: | |
| 411 results.AddTestRunResults(python_dispatch.Dispatch(options)) | |
| 412 report_results.LogFull( | |
| 413 results=results, | |
| 414 test_type='UIAutomator', | |
| 415 test_package=os.path.basename(options.test_jar), | |
| 416 annotation=options.annotations, | |
| 417 build_type=options.build_type, | |
| 418 flakiness_server=options.flakiness_dashboard_server) | |
| 419 total_failed += len(results.GetNotPass()) | |
| 420 else: | |
| 421 raise Exception('Unknown test type state') | |
| 422 | |
| 423 return total_failed | |
| 424 | |
| 425 | |
| 426 def HelpCommand(command, options, args, option_parser): | |
| 427 """Display help for a certain command, or overall help. | |
| 428 | |
| 429 Args: | |
| 430 command: String indicating the command that was received to trigger | |
| 431 this function. | |
| 432 options: optparse options dictionary. | |
| 433 args: List of extra args from optparse. | |
| 434 option_parser: optparse.OptionParser object. | |
| 435 | |
| 436 Returns: | |
| 437 Integer indicated exit code. | |
| 438 """ | |
| 439 # If we don't have any args, display overall help | |
| 440 if len(args) < 3: | |
| 441 option_parser.print_help() | |
| 442 return 0 | |
| 443 | |
| 444 command = args[2] | |
| 445 | |
| 446 if command not in VALID_COMMANDS: | |
| 447 option_parser.error('Unrecognized command.') | |
| 448 | |
| 449 # Treat the help command as a special case. We don't care about showing a | |
| 450 # specific help page for itself. | |
| 451 if command == 'help': | |
| 452 option_parser.print_help() | |
| 453 return 0 | |
| 454 | |
| 455 VALID_COMMANDS[command].add_options_func(option_parser) | |
| 456 option_parser.usage = '%prog ' + command + ' [options]' | |
| 457 option_parser.command_list = None | |
| 458 option_parser.print_help() | |
| 459 | |
| 460 return 0 | |
| 461 | |
| 462 | |
| 463 # Define a named tuple for the values in the VALID_COMMANDS dictionary so the | |
| 464 # syntax is a bit prettier. The tuple is two functions: (add options, run | |
| 465 # command). | |
| 466 CommandFunctionTuple = collections.namedtuple( | |
| 467 'CommandFunctionTuple', ['add_options_func', 'run_command_func']) | |
| 468 VALID_COMMANDS = { | |
| 469 'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand), | |
| 470 'content_browsertests': CommandFunctionTuple( | |
| 471 AddContentBrowserTestOptions, RunTestsCommand), | |
| 472 'instrumentation': CommandFunctionTuple( | |
| 473 AddInstrumentationTestOptions, RunTestsCommand), | |
| 474 'uiautomator': CommandFunctionTuple( | |
| 475 AddUIAutomatorTestOptions, RunTestsCommand), | |
| 476 'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand) | |
| 477 } | |
| 478 | |
| 479 | |
| 480 class CommandOptionParser(optparse.OptionParser): | |
| 481 """Wrapper class for OptionParser to help with listing commands.""" | |
| 482 | |
| 483 def __init__(self, *args, **kwargs): | |
| 484 self.command_list = kwargs.pop('command_list', []) | |
| 485 self.example = kwargs.pop('example', '') | |
| 486 optparse.OptionParser.__init__(self, *args, **kwargs) | |
| 487 | |
| 488 #override | |
| 489 def get_usage(self): | |
| 490 normal_usage = optparse.OptionParser.get_usage(self) | |
| 491 command_list = self.get_command_list() | |
| 492 example = self.get_example() | |
| 493 return self.expand_prog_name(normal_usage + example + command_list) | |
| 494 | |
| 495 #override | |
| 496 def get_command_list(self): | |
| 497 if self.command_list: | |
| 498 return '\nCommands:\n %s\n' % '\n '.join(sorted(self.command_list)) | |
| 499 return '' | |
| 500 | |
| 501 def get_example(self): | |
| 502 if self.example: | |
| 503 return '\nExample:\n %s\n' % self.example | |
| 504 return '' | |
| 505 | |
| 506 def main(argv): | |
| 507 option_parser = CommandOptionParser( | |
| 508 usage='Usage: %prog <command> [options]', | |
| 509 command_list=VALID_COMMANDS.keys()) | |
| 510 | |
| 511 if len(argv) < 2 or argv[1] not in VALID_COMMANDS: | |
| 512 option_parser.print_help() | |
| 513 return 0 | |
| 514 command = argv[1] | |
| 515 VALID_COMMANDS[command].add_options_func(option_parser) | |
| 516 options, args = option_parser.parse_args(argv) | |
| 517 exit_code = VALID_COMMANDS[command].run_command_func( | |
| 518 command, options, args, option_parser) | |
| 519 | |
| 520 # Failures of individual test suites are communicated by printing a | |
| 521 # STEP_FAILURE message. | |
| 522 # Returning a success exit status also prevents the buildbot from incorrectly | |
| 523 # marking the last suite as failed if there were failures in other suites in | |
| 524 # the batch (this happens because the exit status is a sum of all failures | |
| 525 # from all suites, but the buildbot associates the exit status only with the | |
| 526 # most recent step). | |
| 527 return exit_code | |
| 528 | |
| 529 | |
| 530 if __name__ == '__main__': | |
| 531 sys.exit(main(sys.argv)) | |
| OLD | NEW |