| OLD | NEW |
| (Empty) |
| 1 #!/usr/bin/env python | |
| 2 # | |
| 3 # Copyright 2013 The Chromium Authors. All rights reserved. | |
| 4 # Use of this source code is governed by a BSD-style license that can be | |
| 5 # found in the LICENSE file. | |
| 6 | |
| 7 """Runs all types of tests from one unified interface.""" | |
| 8 | |
| 9 import argparse | |
| 10 import collections | |
| 11 import logging | |
| 12 import os | |
| 13 import shutil | |
| 14 import signal | |
| 15 import sys | |
| 16 import threading | |
| 17 import unittest | |
| 18 | |
| 19 from pylib import constants | |
| 20 from pylib import forwarder | |
| 21 from pylib import ports | |
| 22 from pylib.base import base_test_result | |
| 23 from pylib.base import environment_factory | |
| 24 from pylib.base import test_dispatcher | |
| 25 from pylib.base import test_instance_factory | |
| 26 from pylib.base import test_run_factory | |
| 27 from pylib.device import device_errors | |
| 28 from pylib.device import device_utils | |
| 29 from pylib.gtest import gtest_config | |
| 30 # TODO(jbudorick): Remove this once we stop selectively enabling platform mode. | |
| 31 from pylib.gtest import gtest_test_instance | |
| 32 from pylib.gtest import setup as gtest_setup | |
| 33 from pylib.gtest import test_options as gtest_test_options | |
| 34 from pylib.linker import setup as linker_setup | |
| 35 from pylib.host_driven import setup as host_driven_setup | |
| 36 from pylib.instrumentation import setup as instrumentation_setup | |
| 37 from pylib.instrumentation import test_options as instrumentation_test_options | |
| 38 from pylib.junit import setup as junit_setup | |
| 39 from pylib.junit import test_dispatcher as junit_dispatcher | |
| 40 from pylib.monkey import setup as monkey_setup | |
| 41 from pylib.monkey import test_options as monkey_test_options | |
| 42 from pylib.perf import setup as perf_setup | |
| 43 from pylib.perf import test_options as perf_test_options | |
| 44 from pylib.perf import test_runner as perf_test_runner | |
| 45 from pylib.results import json_results | |
| 46 from pylib.results import report_results | |
| 47 from pylib.uiautomator import setup as uiautomator_setup | |
| 48 from pylib.uiautomator import test_options as uiautomator_test_options | |
| 49 from pylib.utils import apk_helper | |
| 50 from pylib.utils import base_error | |
| 51 from pylib.utils import reraiser_thread | |
| 52 from pylib.utils import run_tests_helper | |
| 53 | |
| 54 | |
| 55 def AddCommonOptions(parser): | |
| 56 """Adds all common options to |parser|.""" | |
| 57 | |
| 58 group = parser.add_argument_group('Common Options') | |
| 59 | |
| 60 default_build_type = os.environ.get('BUILDTYPE', 'Debug') | |
| 61 | |
| 62 debug_or_release_group = group.add_mutually_exclusive_group() | |
| 63 debug_or_release_group.add_argument( | |
| 64 '--debug', action='store_const', const='Debug', dest='build_type', | |
| 65 default=default_build_type, | |
| 66 help=('If set, run test suites under out/Debug. ' | |
| 67 'Default is env var BUILDTYPE or Debug.')) | |
| 68 debug_or_release_group.add_argument( | |
| 69 '--release', action='store_const', const='Release', dest='build_type', | |
| 70 help=('If set, run test suites under out/Release. ' | |
| 71 'Default is env var BUILDTYPE or Debug.')) | |
| 72 | |
| 73 group.add_argument('--build-directory', dest='build_directory', | |
| 74 help=('Path to the directory in which build files are' | |
| 75 ' located (should not include build type)')) | |
| 76 group.add_argument('--output-directory', dest='output_directory', | |
| 77 help=('Path to the directory in which build files are' | |
| 78 ' located (must include build type). This will take' | |
| 79 ' precedence over --debug, --release and' | |
| 80 ' --build-directory')) | |
| 81 group.add_argument('--num_retries', dest='num_retries', type=int, default=2, | |
| 82 help=('Number of retries for a test before ' | |
| 83 'giving up (default: %(default)s).')) | |
| 84 group.add_argument('-v', | |
| 85 '--verbose', | |
| 86 dest='verbose_count', | |
| 87 default=0, | |
| 88 action='count', | |
| 89 help='Verbose level (multiple times for more)') | |
| 90 group.add_argument('--flakiness-dashboard-server', | |
| 91 dest='flakiness_dashboard_server', | |
| 92 help=('Address of the server that is hosting the ' | |
| 93 'Chrome for Android flakiness dashboard.')) | |
| 94 group.add_argument('--enable-platform-mode', action='store_true', | |
| 95 help=('Run the test scripts in platform mode, which ' | |
| 96 'conceptually separates the test runner from the ' | |
| 97 '"device" (local or remote, real or emulated) on ' | |
| 98 'which the tests are running. [experimental]')) | |
| 99 group.add_argument('-e', '--environment', default='local', | |
| 100 choices=constants.VALID_ENVIRONMENTS, | |
| 101 help='Test environment to run in (default: %(default)s).') | |
| 102 group.add_argument('--adb-path', | |
| 103 help=('Specify the absolute path of the adb binary that ' | |
| 104 'should be used.')) | |
| 105 group.add_argument('--json-results-file', dest='json_results_file', | |
| 106 help='If set, will dump results in JSON form ' | |
| 107 'to specified file.') | |
| 108 | |
| 109 def ProcessCommonOptions(args): | |
| 110 """Processes and handles all common options.""" | |
| 111 run_tests_helper.SetLogLevel(args.verbose_count) | |
| 112 constants.SetBuildType(args.build_type) | |
| 113 if args.build_directory: | |
| 114 constants.SetBuildDirectory(args.build_directory) | |
| 115 if args.output_directory: | |
| 116 constants.SetOutputDirectory(args.output_directory) | |
| 117 if args.adb_path: | |
| 118 constants.SetAdbPath(args.adb_path) | |
| 119 # Some things such as Forwarder require ADB to be in the environment path. | |
| 120 adb_dir = os.path.dirname(constants.GetAdbPath()) | |
| 121 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep): | |
| 122 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH'] | |
| 123 | |
| 124 | |
| 125 def AddRemoteDeviceOptions(parser): | |
| 126 group = parser.add_argument_group('Remote Device Options') | |
| 127 | |
| 128 group.add_argument('--trigger', | |
| 129 help=('Only triggers the test if set. Stores test_run_id ' | |
| 130 'in given file path. ')) | |
| 131 group.add_argument('--collect', | |
| 132 help=('Only collects the test results if set. ' | |
| 133 'Gets test_run_id from given file path.')) | |
| 134 group.add_argument('--remote-device', action='append', | |
| 135 help='Device type to run test on.') | |
| 136 group.add_argument('--results-path', | |
| 137 help='File path to download results to.') | |
| 138 group.add_argument('--api-protocol', | |
| 139 help='HTTP protocol to use. (http or https)') | |
| 140 group.add_argument('--api-address', | |
| 141 help='Address to send HTTP requests.') | |
| 142 group.add_argument('--api-port', | |
| 143 help='Port to send HTTP requests to.') | |
| 144 group.add_argument('--runner-type', | |
| 145 help='Type of test to run as.') | |
| 146 group.add_argument('--runner-package', | |
| 147 help='Package name of test.') | |
| 148 group.add_argument('--device-type', | |
| 149 choices=constants.VALID_DEVICE_TYPES, | |
| 150 help=('Type of device to run on. iOS or android')) | |
| 151 group.add_argument('--device-oem', action='append', | |
| 152 help='Device OEM to run on.') | |
| 153 group.add_argument('--remote-device-file', | |
| 154 help=('File with JSON to select remote device. ' | |
| 155 'Overrides all other flags.')) | |
| 156 group.add_argument('--remote-device-timeout', type=int, | |
| 157 help='Times to retry finding remote device') | |
| 158 group.add_argument('--network-config', type=int, | |
| 159 help='Integer that specifies the network environment ' | |
| 160 'that the tests will be run in.') | |
| 161 | |
| 162 device_os_group = group.add_mutually_exclusive_group() | |
| 163 device_os_group.add_argument('--remote-device-minimum-os', | |
| 164 help='Minimum OS on device.') | |
| 165 device_os_group.add_argument('--remote-device-os', action='append', | |
| 166 help='OS to have on the device.') | |
| 167 | |
| 168 api_secret_group = group.add_mutually_exclusive_group() | |
| 169 api_secret_group.add_argument('--api-secret', default='', | |
| 170 help='API secret for remote devices.') | |
| 171 api_secret_group.add_argument('--api-secret-file', default='', | |
| 172 help='Path to file that contains API secret.') | |
| 173 | |
| 174 api_key_group = group.add_mutually_exclusive_group() | |
| 175 api_key_group.add_argument('--api-key', default='', | |
| 176 help='API key for remote devices.') | |
| 177 api_key_group.add_argument('--api-key-file', default='', | |
| 178 help='Path to file that contains API key.') | |
| 179 | |
| 180 | |
| 181 def AddDeviceOptions(parser): | |
| 182 """Adds device options to |parser|.""" | |
| 183 group = parser.add_argument_group(title='Device Options') | |
| 184 group.add_argument('--tool', | |
| 185 dest='tool', | |
| 186 help=('Run the test under a tool ' | |
| 187 '(use --tool help to list them)')) | |
| 188 group.add_argument('-d', '--device', dest='test_device', | |
| 189 help=('Target device for the test suite ' | |
| 190 'to run on.')) | |
| 191 | |
| 192 | |
| 193 def AddGTestOptions(parser): | |
| 194 """Adds gtest options to |parser|.""" | |
| 195 | |
| 196 gtest_suites = list(gtest_config.STABLE_TEST_SUITES | |
| 197 + gtest_config.EXPERIMENTAL_TEST_SUITES) | |
| 198 | |
| 199 group = parser.add_argument_group('GTest Options') | |
| 200 group.add_argument('-s', '--suite', dest='suite_name', | |
| 201 nargs='+', metavar='SUITE_NAME', required=True, | |
| 202 help=('Executable name of the test suite to run. ' | |
| 203 'Available suites include (but are not limited to): ' | |
| 204 '%s' % ', '.join('"%s"' % s for s in gtest_suites))) | |
| 205 group.add_argument('--gtest_also_run_disabled_tests', | |
| 206 '--gtest-also-run-disabled-tests', | |
| 207 dest='run_disabled', action='store_true', | |
| 208 help='Also run disabled tests if applicable.') | |
| 209 group.add_argument('-a', '--test-arguments', dest='test_arguments', | |
| 210 default='', | |
| 211 help='Additional arguments to pass to the test.') | |
| 212 group.add_argument('-t', dest='timeout', type=int, default=60, | |
| 213 help='Timeout to wait for each test ' | |
| 214 '(default: %(default)s).') | |
| 215 group.add_argument('--isolate_file_path', | |
| 216 '--isolate-file-path', | |
| 217 dest='isolate_file_path', | |
| 218 help='.isolate file path to override the default ' | |
| 219 'path') | |
| 220 group.add_argument('--app-data-file', action='append', dest='app_data_files', | |
| 221 help='A file path relative to the app data directory ' | |
| 222 'that should be saved to the host.') | |
| 223 group.add_argument('--app-data-file-dir', | |
| 224 help='Host directory to which app data files will be' | |
| 225 ' saved. Used with --app-data-file.') | |
| 226 group.add_argument('--delete-stale-data', dest='delete_stale_data', | |
| 227 action='store_true', | |
| 228 help='Delete stale test data on the device.') | |
| 229 | |
| 230 filter_group = group.add_mutually_exclusive_group() | |
| 231 filter_group.add_argument('-f', '--gtest_filter', '--gtest-filter', | |
| 232 dest='test_filter', | |
| 233 help='googletest-style filter string.') | |
| 234 filter_group.add_argument('--gtest-filter-file', dest='test_filter_file', | |
| 235 help='Path to file that contains googletest-style ' | |
| 236 'filter strings. (Lines will be joined with ' | |
| 237 '":" to create a single filter string.)') | |
| 238 | |
| 239 AddDeviceOptions(parser) | |
| 240 AddCommonOptions(parser) | |
| 241 AddRemoteDeviceOptions(parser) | |
| 242 | |
| 243 | |
| 244 def AddLinkerTestOptions(parser): | |
| 245 group = parser.add_argument_group('Linker Test Options') | |
| 246 group.add_argument('-f', '--gtest-filter', dest='test_filter', | |
| 247 help='googletest-style filter string.') | |
| 248 AddCommonOptions(parser) | |
| 249 AddDeviceOptions(parser) | |
| 250 | |
| 251 | |
| 252 def AddJavaTestOptions(argument_group): | |
| 253 """Adds the Java test options to |option_parser|.""" | |
| 254 | |
| 255 argument_group.add_argument( | |
| 256 '-f', '--test-filter', dest='test_filter', | |
| 257 help=('Test filter (if not fully qualified, will run all matches).')) | |
| 258 argument_group.add_argument( | |
| 259 '-A', '--annotation', dest='annotation_str', | |
| 260 help=('Comma-separated list of annotations. Run only tests with any of ' | |
| 261 'the given annotations. An annotation can be either a key or a ' | |
| 262 'key-values pair. A test that has no annotation is considered ' | |
| 263 '"SmallTest".')) | |
| 264 argument_group.add_argument( | |
| 265 '-E', '--exclude-annotation', dest='exclude_annotation_str', | |
| 266 help=('Comma-separated list of annotations. Exclude tests with these ' | |
| 267 'annotations.')) | |
| 268 argument_group.add_argument( | |
| 269 '--screenshot', dest='screenshot_failures', action='store_true', | |
| 270 help='Capture screenshots of test failures') | |
| 271 argument_group.add_argument( | |
| 272 '--save-perf-json', action='store_true', | |
| 273 help='Saves the JSON file for each UI Perf test.') | |
| 274 argument_group.add_argument( | |
| 275 '--official-build', action='store_true', help='Run official build tests.') | |
| 276 argument_group.add_argument( | |
| 277 '--test_data', '--test-data', action='append', default=[], | |
| 278 help=('Each instance defines a directory of test data that should be ' | |
| 279 'copied to the target(s) before running the tests. The argument ' | |
| 280 'should be of the form <target>:<source>, <target> is relative to ' | |
| 281 'the device data directory, and <source> is relative to the ' | |
| 282 'chromium build directory.')) | |
| 283 argument_group.add_argument( | |
| 284 '--disable-dalvik-asserts', dest='set_asserts', action='store_false', | |
| 285 default=True, help='Removes the dalvik.vm.enableassertions property') | |
| 286 | |
| 287 | |
| 288 | |
| 289 def ProcessJavaTestOptions(args): | |
| 290 """Processes options/arguments and populates |options| with defaults.""" | |
| 291 | |
| 292 # TODO(jbudorick): Handle most of this function in argparse. | |
| 293 if args.annotation_str: | |
| 294 args.annotations = args.annotation_str.split(',') | |
| 295 elif args.test_filter: | |
| 296 args.annotations = [] | |
| 297 else: | |
| 298 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', | |
| 299 'EnormousTest', 'IntegrationTest'] | |
| 300 | |
| 301 if args.exclude_annotation_str: | |
| 302 args.exclude_annotations = args.exclude_annotation_str.split(',') | |
| 303 else: | |
| 304 args.exclude_annotations = [] | |
| 305 | |
| 306 | |
| 307 def AddInstrumentationTestOptions(parser): | |
| 308 """Adds Instrumentation test options to |parser|.""" | |
| 309 | |
| 310 parser.usage = '%(prog)s [options]' | |
| 311 | |
| 312 group = parser.add_argument_group('Instrumentation Test Options') | |
| 313 AddJavaTestOptions(group) | |
| 314 | |
| 315 java_or_python_group = group.add_mutually_exclusive_group() | |
| 316 java_or_python_group.add_argument( | |
| 317 '-j', '--java-only', action='store_false', | |
| 318 dest='run_python_tests', default=True, help='Run only the Java tests.') | |
| 319 java_or_python_group.add_argument( | |
| 320 '-p', '--python-only', action='store_false', | |
| 321 dest='run_java_tests', default=True, | |
| 322 help='Run only the host-driven tests.') | |
| 323 | |
| 324 group.add_argument('--host-driven-root', | |
| 325 help='Root of the host-driven tests.') | |
| 326 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger', | |
| 327 action='store_true', | |
| 328 help='Wait for debugger.') | |
| 329 group.add_argument('--apk-under-test', dest='apk_under_test', | |
| 330 help=('the name of the apk under test.')) | |
| 331 group.add_argument('--test-apk', dest='test_apk', required=True, | |
| 332 help=('The name of the apk containing the tests ' | |
| 333 '(without the .apk extension; ' | |
| 334 'e.g. "ContentShellTest").')) | |
| 335 group.add_argument('--support-apk', dest='test_support_apk_path', | |
| 336 help=('The path to an optional support apk to be ' | |
| 337 'installed alongside the test apk. The ' | |
| 338 'path should be relative to the output ' | |
| 339 'directory (--output-directory).')) | |
| 340 group.add_argument('--coverage-dir', | |
| 341 help=('Directory in which to place all generated ' | |
| 342 'EMMA coverage files.')) | |
| 343 group.add_argument('--device-flags', dest='device_flags', default='', | |
| 344 help='The relative filepath to a file containing ' | |
| 345 'command-line flags to set on the device') | |
| 346 group.add_argument('--device-flags-file', default='', | |
| 347 help='The relative filepath to a file containing ' | |
| 348 'command-line flags to set on the device') | |
| 349 group.add_argument('--isolate_file_path', | |
| 350 '--isolate-file-path', | |
| 351 dest='isolate_file_path', | |
| 352 help='.isolate file path to override the default ' | |
| 353 'path') | |
| 354 group.add_argument('--delete-stale-data', dest='delete_stale_data', | |
| 355 action='store_true', | |
| 356 help='Delete stale test data on the device.') | |
| 357 | |
| 358 AddCommonOptions(parser) | |
| 359 AddDeviceOptions(parser) | |
| 360 AddRemoteDeviceOptions(parser) | |
| 361 | |
| 362 | |
| 363 def ProcessInstrumentationOptions(args): | |
| 364 """Processes options/arguments and populate |options| with defaults. | |
| 365 | |
| 366 Args: | |
| 367 args: argparse.Namespace object. | |
| 368 | |
| 369 Returns: | |
| 370 An InstrumentationOptions named tuple which contains all options relevant to | |
| 371 instrumentation tests. | |
| 372 """ | |
| 373 | |
| 374 ProcessJavaTestOptions(args) | |
| 375 | |
| 376 if not args.host_driven_root: | |
| 377 args.run_python_tests = False | |
| 378 | |
| 379 args.test_apk_path = os.path.join( | |
| 380 constants.GetOutDirectory(), | |
| 381 constants.SDK_BUILD_APKS_DIR, | |
| 382 '%s.apk' % args.test_apk) | |
| 383 args.test_apk_jar_path = os.path.join( | |
| 384 constants.GetOutDirectory(), | |
| 385 constants.SDK_BUILD_TEST_JAVALIB_DIR, | |
| 386 '%s.jar' % args.test_apk) | |
| 387 | |
| 388 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path) | |
| 389 | |
| 390 # TODO(jbudorick): Get rid of InstrumentationOptions. | |
| 391 return instrumentation_test_options.InstrumentationOptions( | |
| 392 args.tool, | |
| 393 args.annotations, | |
| 394 args.exclude_annotations, | |
| 395 args.test_filter, | |
| 396 args.test_data, | |
| 397 args.save_perf_json, | |
| 398 args.screenshot_failures, | |
| 399 args.wait_for_debugger, | |
| 400 args.coverage_dir, | |
| 401 args.test_apk, | |
| 402 args.test_apk_path, | |
| 403 args.test_apk_jar_path, | |
| 404 args.test_runner, | |
| 405 args.test_support_apk_path, | |
| 406 args.device_flags, | |
| 407 args.isolate_file_path, | |
| 408 args.set_asserts, | |
| 409 args.delete_stale_data | |
| 410 ) | |
| 411 | |
| 412 | |
| 413 def AddUIAutomatorTestOptions(parser): | |
| 414 """Adds UI Automator test options to |parser|.""" | |
| 415 | |
| 416 group = parser.add_argument_group('UIAutomator Test Options') | |
| 417 AddJavaTestOptions(group) | |
| 418 group.add_argument( | |
| 419 '--package', required=True, choices=constants.PACKAGE_INFO.keys(), | |
| 420 metavar='PACKAGE', help='Package under test.') | |
| 421 group.add_argument( | |
| 422 '--test-jar', dest='test_jar', required=True, | |
| 423 help=('The name of the dexed jar containing the tests (without the ' | |
| 424 '.dex.jar extension). Alternatively, this can be a full path ' | |
| 425 'to the jar.')) | |
| 426 | |
| 427 AddCommonOptions(parser) | |
| 428 AddDeviceOptions(parser) | |
| 429 | |
| 430 | |
| 431 def ProcessUIAutomatorOptions(args): | |
| 432 """Processes UIAutomator options/arguments. | |
| 433 | |
| 434 Args: | |
| 435 args: argparse.Namespace object. | |
| 436 | |
| 437 Returns: | |
| 438 A UIAutomatorOptions named tuple which contains all options relevant to | |
| 439 uiautomator tests. | |
| 440 """ | |
| 441 | |
| 442 ProcessJavaTestOptions(args) | |
| 443 | |
| 444 if os.path.exists(args.test_jar): | |
| 445 # The dexed JAR is fully qualified, assume the info JAR lives along side. | |
| 446 args.uiautomator_jar = args.test_jar | |
| 447 else: | |
| 448 args.uiautomator_jar = os.path.join( | |
| 449 constants.GetOutDirectory(), | |
| 450 constants.SDK_BUILD_JAVALIB_DIR, | |
| 451 '%s.dex.jar' % args.test_jar) | |
| 452 args.uiautomator_info_jar = ( | |
| 453 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] + | |
| 454 '_java.jar') | |
| 455 | |
| 456 return uiautomator_test_options.UIAutomatorOptions( | |
| 457 args.tool, | |
| 458 args.annotations, | |
| 459 args.exclude_annotations, | |
| 460 args.test_filter, | |
| 461 args.test_data, | |
| 462 args.save_perf_json, | |
| 463 args.screenshot_failures, | |
| 464 args.uiautomator_jar, | |
| 465 args.uiautomator_info_jar, | |
| 466 args.package, | |
| 467 args.set_asserts) | |
| 468 | |
| 469 | |
| 470 def AddJUnitTestOptions(parser): | |
| 471 """Adds junit test options to |parser|.""" | |
| 472 | |
| 473 group = parser.add_argument_group('JUnit Test Options') | |
| 474 group.add_argument( | |
| 475 '-s', '--test-suite', dest='test_suite', required=True, | |
| 476 help=('JUnit test suite to run.')) | |
| 477 group.add_argument( | |
| 478 '-f', '--test-filter', dest='test_filter', | |
| 479 help='Filters tests googletest-style.') | |
| 480 group.add_argument( | |
| 481 '--package-filter', dest='package_filter', | |
| 482 help='Filters tests by package.') | |
| 483 group.add_argument( | |
| 484 '--runner-filter', dest='runner_filter', | |
| 485 help='Filters tests by runner class. Must be fully qualified.') | |
| 486 group.add_argument( | |
| 487 '--sdk-version', dest='sdk_version', type=int, | |
| 488 help='The Android SDK version.') | |
| 489 AddCommonOptions(parser) | |
| 490 | |
| 491 | |
| 492 def AddMonkeyTestOptions(parser): | |
| 493 """Adds monkey test options to |parser|.""" | |
| 494 | |
| 495 group = parser.add_argument_group('Monkey Test Options') | |
| 496 group.add_argument( | |
| 497 '--package', required=True, choices=constants.PACKAGE_INFO.keys(), | |
| 498 metavar='PACKAGE', help='Package under test.') | |
| 499 group.add_argument( | |
| 500 '--event-count', default=10000, type=int, | |
| 501 help='Number of events to generate (default: %(default)s).') | |
| 502 group.add_argument( | |
| 503 '--category', default='', | |
| 504 help='A list of allowed categories.') | |
| 505 group.add_argument( | |
| 506 '--throttle', default=100, type=int, | |
| 507 help='Delay between events (ms) (default: %(default)s). ') | |
| 508 group.add_argument( | |
| 509 '--seed', type=int, | |
| 510 help=('Seed value for pseudo-random generator. Same seed value generates ' | |
| 511 'the same sequence of events. Seed is randomized by default.')) | |
| 512 group.add_argument( | |
| 513 '--extra-args', default='', | |
| 514 help=('String of other args to pass to the command verbatim.')) | |
| 515 | |
| 516 AddCommonOptions(parser) | |
| 517 AddDeviceOptions(parser) | |
| 518 | |
| 519 def ProcessMonkeyTestOptions(args): | |
| 520 """Processes all monkey test options. | |
| 521 | |
| 522 Args: | |
| 523 args: argparse.Namespace object. | |
| 524 | |
| 525 Returns: | |
| 526 A MonkeyOptions named tuple which contains all options relevant to | |
| 527 monkey tests. | |
| 528 """ | |
| 529 # TODO(jbudorick): Handle this directly in argparse with nargs='+' | |
| 530 category = args.category | |
| 531 if category: | |
| 532 category = args.category.split(',') | |
| 533 | |
| 534 # TODO(jbudorick): Get rid of MonkeyOptions. | |
| 535 return monkey_test_options.MonkeyOptions( | |
| 536 args.verbose_count, | |
| 537 args.package, | |
| 538 args.event_count, | |
| 539 category, | |
| 540 args.throttle, | |
| 541 args.seed, | |
| 542 args.extra_args) | |
| 543 | |
| 544 def AddUirobotTestOptions(parser): | |
| 545 """Adds uirobot test options to |option_parser|.""" | |
| 546 group = parser.add_argument_group('Uirobot Test Options') | |
| 547 | |
| 548 group.add_argument('--app-under-test', required=True, | |
| 549 help='APK to run tests on.') | |
| 550 group.add_argument( | |
| 551 '--minutes', default=5, type=int, | |
| 552 help='Number of minutes to run uirobot test [default: %(default)s].') | |
| 553 | |
| 554 AddCommonOptions(parser) | |
| 555 AddDeviceOptions(parser) | |
| 556 AddRemoteDeviceOptions(parser) | |
| 557 | |
| 558 def AddPerfTestOptions(parser): | |
| 559 """Adds perf test options to |parser|.""" | |
| 560 | |
| 561 group = parser.add_argument_group('Perf Test Options') | |
| 562 | |
| 563 class SingleStepAction(argparse.Action): | |
| 564 def __call__(self, parser, namespace, values, option_string=None): | |
| 565 if values and not namespace.single_step: | |
| 566 parser.error('single step command provided, ' | |
| 567 'but --single-step not specified.') | |
| 568 elif namespace.single_step and not values: | |
| 569 parser.error('--single-step specified, ' | |
| 570 'but no single step command provided.') | |
| 571 setattr(namespace, self.dest, values) | |
| 572 | |
| 573 step_group = group.add_mutually_exclusive_group(required=True) | |
| 574 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER. | |
| 575 # This requires removing "--" from client calls. | |
| 576 step_group.add_argument( | |
| 577 '--single-step', action='store_true', | |
| 578 help='Execute the given command with retries, but only print the result ' | |
| 579 'for the "most successful" round.') | |
| 580 step_group.add_argument( | |
| 581 '--steps', | |
| 582 help='JSON file containing the list of commands to run.') | |
| 583 step_group.add_argument( | |
| 584 '--print-step', | |
| 585 help='The name of a previously executed perf step to print.') | |
| 586 | |
| 587 group.add_argument( | |
| 588 '--output-json-list', | |
| 589 help='Write a simple list of names from --steps into the given file.') | |
| 590 group.add_argument( | |
| 591 '--collect-chartjson-data', | |
| 592 action='store_true', | |
| 593 help='Cache the chartjson output from each step for later use.') | |
| 594 group.add_argument( | |
| 595 '--output-chartjson-data', | |
| 596 default='', | |
| 597 help='Write out chartjson into the given file.') | |
| 598 group.add_argument( | |
| 599 '--flaky-steps', | |
| 600 help=('A JSON file containing steps that are flaky ' | |
| 601 'and will have its exit code ignored.')) | |
| 602 group.add_argument( | |
| 603 '--no-timeout', action='store_true', | |
| 604 help=('Do not impose a timeout. Each perf step is responsible for ' | |
| 605 'implementing the timeout logic.')) | |
| 606 group.add_argument( | |
| 607 '-f', '--test-filter', | |
| 608 help=('Test filter (will match against the names listed in --steps).')) | |
| 609 group.add_argument( | |
| 610 '--dry-run', action='store_true', | |
| 611 help='Just print the steps without executing.') | |
| 612 # Uses 0.1 degrees C because that's what Android does. | |
| 613 group.add_argument( | |
| 614 '--max-battery-temp', type=int, | |
| 615 help='Only start tests when the battery is at or below the given ' | |
| 616 'temperature (0.1 C)') | |
| 617 group.add_argument('single_step_command', nargs='*', action=SingleStepAction, | |
| 618 help='If --single-step is specified, the command to run.') | |
| 619 group.add_argument('--min-battery-level', type=int, | |
| 620 help='Only starts tests when the battery is charged above ' | |
| 621 'given level.') | |
| 622 AddCommonOptions(parser) | |
| 623 AddDeviceOptions(parser) | |
| 624 | |
| 625 | |
| 626 def ProcessPerfTestOptions(args): | |
| 627 """Processes all perf test options. | |
| 628 | |
| 629 Args: | |
| 630 args: argparse.Namespace object. | |
| 631 | |
| 632 Returns: | |
| 633 A PerfOptions named tuple which contains all options relevant to | |
| 634 perf tests. | |
| 635 """ | |
| 636 # TODO(jbudorick): Move single_step handling down into the perf tests. | |
| 637 if args.single_step: | |
| 638 args.single_step = ' '.join(args.single_step_command) | |
| 639 # TODO(jbudorick): Get rid of PerfOptions. | |
| 640 return perf_test_options.PerfOptions( | |
| 641 args.steps, args.flaky_steps, args.output_json_list, | |
| 642 args.print_step, args.no_timeout, args.test_filter, | |
| 643 args.dry_run, args.single_step, args.collect_chartjson_data, | |
| 644 args.output_chartjson_data, args.max_battery_temp, args.min_battery_level) | |
| 645 | |
| 646 | |
| 647 def AddPythonTestOptions(parser): | |
| 648 group = parser.add_argument_group('Python Test Options') | |
| 649 group.add_argument( | |
| 650 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME', | |
| 651 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(), | |
| 652 help='Name of the test suite to run.') | |
| 653 AddCommonOptions(parser) | |
| 654 | |
| 655 | |
| 656 def _RunGTests(args, devices): | |
| 657 """Subcommand of RunTestsCommands which runs gtests.""" | |
| 658 exit_code = 0 | |
| 659 for suite_name in args.suite_name: | |
| 660 # TODO(jbudorick): Either deprecate multi-suite or move its handling down | |
| 661 # into the gtest code. | |
| 662 gtest_options = gtest_test_options.GTestOptions( | |
| 663 args.tool, | |
| 664 args.test_filter, | |
| 665 args.run_disabled, | |
| 666 args.test_arguments, | |
| 667 args.timeout, | |
| 668 args.isolate_file_path, | |
| 669 suite_name, | |
| 670 args.app_data_files, | |
| 671 args.app_data_file_dir, | |
| 672 args.delete_stale_data) | |
| 673 runner_factory, tests = gtest_setup.Setup(gtest_options, devices) | |
| 674 | |
| 675 results, test_exit_code = test_dispatcher.RunTests( | |
| 676 tests, runner_factory, devices, shard=True, test_timeout=None, | |
| 677 num_retries=args.num_retries) | |
| 678 | |
| 679 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | |
| 680 exit_code = test_exit_code | |
| 681 | |
| 682 report_results.LogFull( | |
| 683 results=results, | |
| 684 test_type='Unit test', | |
| 685 test_package=suite_name, | |
| 686 flakiness_server=args.flakiness_dashboard_server) | |
| 687 | |
| 688 if args.json_results_file: | |
| 689 json_results.GenerateJsonResultsFile(results, args.json_results_file) | |
| 690 | |
| 691 return exit_code | |
| 692 | |
| 693 | |
| 694 def _RunLinkerTests(args, devices): | |
| 695 """Subcommand of RunTestsCommands which runs linker tests.""" | |
| 696 runner_factory, tests = linker_setup.Setup(args, devices) | |
| 697 | |
| 698 results, exit_code = test_dispatcher.RunTests( | |
| 699 tests, runner_factory, devices, shard=True, test_timeout=60, | |
| 700 num_retries=args.num_retries) | |
| 701 | |
| 702 report_results.LogFull( | |
| 703 results=results, | |
| 704 test_type='Linker test', | |
| 705 test_package='ChromiumLinkerTest') | |
| 706 | |
| 707 if args.json_results_file: | |
| 708 json_results.GenerateJsonResultsFile(results, args.json_results_file) | |
| 709 | |
| 710 return exit_code | |
| 711 | |
| 712 | |
| 713 def _RunInstrumentationTests(args, devices): | |
| 714 """Subcommand of RunTestsCommands which runs instrumentation tests.""" | |
| 715 logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices))) | |
| 716 | |
| 717 instrumentation_options = ProcessInstrumentationOptions(args) | |
| 718 | |
| 719 if len(devices) > 1 and args.wait_for_debugger: | |
| 720 logging.warning('Debugger can not be sharded, using first available device') | |
| 721 devices = devices[:1] | |
| 722 | |
| 723 results = base_test_result.TestRunResults() | |
| 724 exit_code = 0 | |
| 725 | |
| 726 if args.run_java_tests: | |
| 727 runner_factory, tests = instrumentation_setup.Setup( | |
| 728 instrumentation_options, devices) | |
| 729 | |
| 730 test_results, exit_code = test_dispatcher.RunTests( | |
| 731 tests, runner_factory, devices, shard=True, test_timeout=None, | |
| 732 num_retries=args.num_retries) | |
| 733 | |
| 734 results.AddTestRunResults(test_results) | |
| 735 | |
| 736 if args.run_python_tests: | |
| 737 runner_factory, tests = host_driven_setup.InstrumentationSetup( | |
| 738 args.host_driven_root, args.official_build, | |
| 739 instrumentation_options) | |
| 740 | |
| 741 if tests: | |
| 742 test_results, test_exit_code = test_dispatcher.RunTests( | |
| 743 tests, runner_factory, devices, shard=True, test_timeout=None, | |
| 744 num_retries=args.num_retries) | |
| 745 | |
| 746 results.AddTestRunResults(test_results) | |
| 747 | |
| 748 # Only allow exit code escalation | |
| 749 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | |
| 750 exit_code = test_exit_code | |
| 751 | |
| 752 if args.device_flags: | |
| 753 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT, | |
| 754 args.device_flags) | |
| 755 | |
| 756 report_results.LogFull( | |
| 757 results=results, | |
| 758 test_type='Instrumentation', | |
| 759 test_package=os.path.basename(args.test_apk), | |
| 760 annotation=args.annotations, | |
| 761 flakiness_server=args.flakiness_dashboard_server) | |
| 762 | |
| 763 if args.json_results_file: | |
| 764 json_results.GenerateJsonResultsFile(results, args.json_results_file) | |
| 765 | |
| 766 return exit_code | |
| 767 | |
| 768 | |
| 769 def _RunUIAutomatorTests(args, devices): | |
| 770 """Subcommand of RunTestsCommands which runs uiautomator tests.""" | |
| 771 uiautomator_options = ProcessUIAutomatorOptions(args) | |
| 772 | |
| 773 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) | |
| 774 | |
| 775 results, exit_code = test_dispatcher.RunTests( | |
| 776 tests, runner_factory, devices, shard=True, test_timeout=None, | |
| 777 num_retries=args.num_retries) | |
| 778 | |
| 779 report_results.LogFull( | |
| 780 results=results, | |
| 781 test_type='UIAutomator', | |
| 782 test_package=os.path.basename(args.test_jar), | |
| 783 annotation=args.annotations, | |
| 784 flakiness_server=args.flakiness_dashboard_server) | |
| 785 | |
| 786 if args.json_results_file: | |
| 787 json_results.GenerateJsonResultsFile(results, args.json_results_file) | |
| 788 | |
| 789 return exit_code | |
| 790 | |
| 791 | |
| 792 def _RunJUnitTests(args): | |
| 793 """Subcommand of RunTestsCommand which runs junit tests.""" | |
| 794 runner_factory, tests = junit_setup.Setup(args) | |
| 795 results, exit_code = junit_dispatcher.RunTests(tests, runner_factory) | |
| 796 | |
| 797 report_results.LogFull( | |
| 798 results=results, | |
| 799 test_type='JUnit', | |
| 800 test_package=args.test_suite) | |
| 801 | |
| 802 if args.json_results_file: | |
| 803 json_results.GenerateJsonResultsFile(results, args.json_results_file) | |
| 804 | |
| 805 return exit_code | |
| 806 | |
| 807 | |
| 808 def _RunMonkeyTests(args, devices): | |
| 809 """Subcommand of RunTestsCommands which runs monkey tests.""" | |
| 810 monkey_options = ProcessMonkeyTestOptions(args) | |
| 811 | |
| 812 runner_factory, tests = monkey_setup.Setup(monkey_options) | |
| 813 | |
| 814 results, exit_code = test_dispatcher.RunTests( | |
| 815 tests, runner_factory, devices, shard=False, test_timeout=None, | |
| 816 num_retries=args.num_retries) | |
| 817 | |
| 818 report_results.LogFull( | |
| 819 results=results, | |
| 820 test_type='Monkey', | |
| 821 test_package='Monkey') | |
| 822 | |
| 823 if args.json_results_file: | |
| 824 json_results.GenerateJsonResultsFile(results, args.json_results_file) | |
| 825 | |
| 826 return exit_code | |
| 827 | |
| 828 | |
| 829 def _RunPerfTests(args): | |
| 830 """Subcommand of RunTestsCommands which runs perf tests.""" | |
| 831 perf_options = ProcessPerfTestOptions(args) | |
| 832 | |
| 833 # Just save a simple json with a list of test names. | |
| 834 if perf_options.output_json_list: | |
| 835 return perf_test_runner.OutputJsonList( | |
| 836 perf_options.steps, perf_options.output_json_list) | |
| 837 | |
| 838 # Just print the results from a single previously executed step. | |
| 839 if perf_options.print_step: | |
| 840 return perf_test_runner.PrintTestOutput( | |
| 841 perf_options.print_step, perf_options.output_chartjson_data) | |
| 842 | |
| 843 runner_factory, tests, devices = perf_setup.Setup(perf_options) | |
| 844 | |
| 845 # shard=False means that each device will get the full list of tests | |
| 846 # and then each one will decide their own affinity. | |
| 847 # shard=True means each device will pop the next test available from a queue, | |
| 848 # which increases throughput but have no affinity. | |
| 849 results, _ = test_dispatcher.RunTests( | |
| 850 tests, runner_factory, devices, shard=False, test_timeout=None, | |
| 851 num_retries=args.num_retries) | |
| 852 | |
| 853 report_results.LogFull( | |
| 854 results=results, | |
| 855 test_type='Perf', | |
| 856 test_package='Perf') | |
| 857 | |
| 858 if args.json_results_file: | |
| 859 json_results.GenerateJsonResultsFile(results, args.json_results_file) | |
| 860 | |
| 861 if perf_options.single_step: | |
| 862 return perf_test_runner.PrintTestOutput('single_step') | |
| 863 | |
| 864 perf_test_runner.PrintSummary(tests) | |
| 865 | |
| 866 # Always return 0 on the sharding stage. Individual tests exit_code | |
| 867 # will be returned on the print_step stage. | |
| 868 return 0 | |
| 869 | |
| 870 | |
| 871 def _RunPythonTests(args): | |
| 872 """Subcommand of RunTestsCommand which runs python unit tests.""" | |
| 873 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name] | |
| 874 suite_path = suite_vars['path'] | |
| 875 suite_test_modules = suite_vars['test_modules'] | |
| 876 | |
| 877 sys.path = [suite_path] + sys.path | |
| 878 try: | |
| 879 suite = unittest.TestSuite() | |
| 880 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m) | |
| 881 for m in suite_test_modules) | |
| 882 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count) | |
| 883 return 0 if runner.run(suite).wasSuccessful() else 1 | |
| 884 finally: | |
| 885 sys.path = sys.path[1:] | |
| 886 | |
| 887 | |
| 888 def _GetAttachedDevices(test_device=None): | |
| 889 """Get all attached devices. | |
| 890 | |
| 891 Args: | |
| 892 test_device: Name of a specific device to use. | |
| 893 | |
| 894 Returns: | |
| 895 A list of attached devices. | |
| 896 """ | |
| 897 attached_devices = device_utils.DeviceUtils.HealthyDevices() | |
| 898 if test_device: | |
| 899 test_device = [d for d in attached_devices if d == test_device] | |
| 900 if not test_device: | |
| 901 raise device_errors.DeviceUnreachableError( | |
| 902 'Did not find device %s among attached device. Attached devices: %s' | |
| 903 % (test_device, ', '.join(attached_devices))) | |
| 904 return test_device | |
| 905 | |
| 906 else: | |
| 907 if not attached_devices: | |
| 908 raise device_errors.NoDevicesError() | |
| 909 return sorted(attached_devices) | |
| 910 | |
| 911 | |
| 912 def RunTestsCommand(args, parser): | |
| 913 """Checks test type and dispatches to the appropriate function. | |
| 914 | |
| 915 Args: | |
| 916 args: argparse.Namespace object. | |
| 917 parser: argparse.ArgumentParser object. | |
| 918 | |
| 919 Returns: | |
| 920 Integer indicated exit code. | |
| 921 | |
| 922 Raises: | |
| 923 Exception: Unknown command name passed in, or an exception from an | |
| 924 individual test runner. | |
| 925 """ | |
| 926 command = args.command | |
| 927 | |
| 928 ProcessCommonOptions(args) | |
| 929 | |
| 930 if args.enable_platform_mode: | |
| 931 return RunTestsInPlatformMode(args, parser) | |
| 932 | |
| 933 if command in constants.LOCAL_MACHINE_TESTS: | |
| 934 devices = [] | |
| 935 else: | |
| 936 devices = _GetAttachedDevices(args.test_device) | |
| 937 | |
| 938 forwarder.Forwarder.RemoveHostLog() | |
| 939 if not ports.ResetTestServerPortAllocation(): | |
| 940 raise Exception('Failed to reset test server port.') | |
| 941 | |
| 942 if command == 'gtest': | |
| 943 if args.suite_name[0] in gtest_test_instance.BROWSER_TEST_SUITES: | |
| 944 return RunTestsInPlatformMode(args, parser) | |
| 945 return _RunGTests(args, devices) | |
| 946 elif command == 'linker': | |
| 947 return _RunLinkerTests(args, devices) | |
| 948 elif command == 'instrumentation': | |
| 949 return _RunInstrumentationTests(args, devices) | |
| 950 elif command == 'uiautomator': | |
| 951 return _RunUIAutomatorTests(args, devices) | |
| 952 elif command == 'junit': | |
| 953 return _RunJUnitTests(args) | |
| 954 elif command == 'monkey': | |
| 955 return _RunMonkeyTests(args, devices) | |
| 956 elif command == 'perf': | |
| 957 return _RunPerfTests(args) | |
| 958 elif command == 'python': | |
| 959 return _RunPythonTests(args) | |
| 960 else: | |
| 961 raise Exception('Unknown test type.') | |
| 962 | |
| 963 | |
| 964 _SUPPORTED_IN_PLATFORM_MODE = [ | |
| 965 # TODO(jbudorick): Add support for more test types. | |
| 966 'gtest', | |
| 967 'instrumentation', | |
| 968 'uirobot', | |
| 969 ] | |
| 970 | |
| 971 | |
| 972 def RunTestsInPlatformMode(args, parser): | |
| 973 | |
| 974 if args.command not in _SUPPORTED_IN_PLATFORM_MODE: | |
| 975 parser.error('%s is not yet supported in platform mode' % args.command) | |
| 976 | |
| 977 with environment_factory.CreateEnvironment(args, parser.error) as env: | |
| 978 with test_instance_factory.CreateTestInstance(args, parser.error) as test: | |
| 979 with test_run_factory.CreateTestRun( | |
| 980 args, env, test, parser.error) as test_run: | |
| 981 results = test_run.RunTests() | |
| 982 | |
| 983 if args.environment == 'remote_device' and args.trigger: | |
| 984 return 0 # Not returning results, only triggering. | |
| 985 | |
| 986 report_results.LogFull( | |
| 987 results=results, | |
| 988 test_type=test.TestType(), | |
| 989 test_package=test_run.TestPackage(), | |
| 990 annotation=getattr(args, 'annotations', None), | |
| 991 flakiness_server=getattr(args, 'flakiness_dashboard_server', None)) | |
| 992 | |
| 993 if args.json_results_file: | |
| 994 json_results.GenerateJsonResultsFile( | |
| 995 results, args.json_results_file) | |
| 996 | |
| 997 return 0 if results.DidRunPass() else constants.ERROR_EXIT_CODE | |
| 998 | |
| 999 | |
| 1000 CommandConfigTuple = collections.namedtuple( | |
| 1001 'CommandConfigTuple', | |
| 1002 ['add_options_func', 'help_txt']) | |
| 1003 VALID_COMMANDS = { | |
| 1004 'gtest': CommandConfigTuple( | |
| 1005 AddGTestOptions, | |
| 1006 'googletest-based C++ tests'), | |
| 1007 'instrumentation': CommandConfigTuple( | |
| 1008 AddInstrumentationTestOptions, | |
| 1009 'InstrumentationTestCase-based Java tests'), | |
| 1010 'uiautomator': CommandConfigTuple( | |
| 1011 AddUIAutomatorTestOptions, | |
| 1012 "Tests that run via Android's uiautomator command"), | |
| 1013 'junit': CommandConfigTuple( | |
| 1014 AddJUnitTestOptions, | |
| 1015 'JUnit4-based Java tests'), | |
| 1016 'monkey': CommandConfigTuple( | |
| 1017 AddMonkeyTestOptions, | |
| 1018 "Tests based on Android's monkey"), | |
| 1019 'perf': CommandConfigTuple( | |
| 1020 AddPerfTestOptions, | |
| 1021 'Performance tests'), | |
| 1022 'python': CommandConfigTuple( | |
| 1023 AddPythonTestOptions, | |
| 1024 'Python tests based on unittest.TestCase'), | |
| 1025 'linker': CommandConfigTuple( | |
| 1026 AddLinkerTestOptions, | |
| 1027 'Linker tests'), | |
| 1028 'uirobot': CommandConfigTuple( | |
| 1029 AddUirobotTestOptions, | |
| 1030 'Uirobot test'), | |
| 1031 } | |
| 1032 | |
| 1033 | |
| 1034 def DumpThreadStacks(_signal, _frame): | |
| 1035 for thread in threading.enumerate(): | |
| 1036 reraiser_thread.LogThreadStack(thread) | |
| 1037 | |
| 1038 | |
| 1039 def main(): | |
| 1040 signal.signal(signal.SIGUSR1, DumpThreadStacks) | |
| 1041 | |
| 1042 parser = argparse.ArgumentParser() | |
| 1043 command_parsers = parser.add_subparsers(title='test types', | |
| 1044 dest='command') | |
| 1045 | |
| 1046 for test_type, config in sorted(VALID_COMMANDS.iteritems(), | |
| 1047 key=lambda x: x[0]): | |
| 1048 subparser = command_parsers.add_parser( | |
| 1049 test_type, usage='%(prog)s [options]', help=config.help_txt) | |
| 1050 config.add_options_func(subparser) | |
| 1051 | |
| 1052 args = parser.parse_args() | |
| 1053 | |
| 1054 try: | |
| 1055 return RunTestsCommand(args, parser) | |
| 1056 except base_error.BaseError as e: | |
| 1057 logging.exception('Error occurred.') | |
| 1058 if e.is_infra_error: | |
| 1059 return constants.INFRA_EXIT_CODE | |
| 1060 return constants.ERROR_EXIT_CODE | |
| 1061 except: # pylint: disable=W0702 | |
| 1062 logging.exception('Unrecognized error occurred.') | |
| 1063 return constants.ERROR_EXIT_CODE | |
| 1064 | |
| 1065 | |
| 1066 if __name__ == '__main__': | |
| 1067 sys.exit(main()) | |
| OLD | NEW |