Chromium Code Reviews| Index: build/android/test_runner.py |
| diff --git a/build/android/test_runner.py b/build/android/test_runner.py |
| index f38b76e6af8a64d9a5da93c06b84cf2862f454a9..7fb267a614bf3ebdc2253181f23fc4018f093e83 100755 |
| --- a/build/android/test_runner.py |
| +++ b/build/android/test_runner.py |
| @@ -6,9 +6,9 @@ |
| """Runs all types of tests from one unified interface.""" |
| +import argparse |
| import collections |
| import logging |
| -import optparse |
| import os |
| import shutil |
| import signal |
| @@ -44,189 +44,163 @@ from pylib.results import report_results |
| from pylib.uiautomator import setup as uiautomator_setup |
| from pylib.uiautomator import test_options as uiautomator_test_options |
| from pylib.utils import apk_helper |
| -from pylib.utils import command_option_parser |
| from pylib.utils import reraiser_thread |
| from pylib.utils import run_tests_helper |
| -def AddCommonOptions(option_parser): |
| - """Adds all common options to |option_parser|.""" |
| +def AddCommonOptions(parser): |
| + """Adds all common options to |parser|.""" |
| + |
| + group = parser.add_argument_group('Common Options') |
| - group = optparse.OptionGroup(option_parser, 'Common Options') |
| default_build_type = os.environ.get('BUILDTYPE', 'Debug') |
| - group.add_option('--debug', action='store_const', const='Debug', |
| - dest='build_type', default=default_build_type, |
| - help=('If set, run test suites under out/Debug. ' |
| - 'Default is env var BUILDTYPE or Debug.')) |
| - group.add_option('--release', action='store_const', |
| - const='Release', dest='build_type', |
| - help=('If set, run test suites under out/Release.' |
| - ' Default is env var BUILDTYPE or Debug.')) |
| - group.add_option('--build-directory', dest='build_directory', |
| - help=('Path to the directory in which build files are' |
| - ' located (should not include build type)')) |
| - group.add_option('--output-directory', dest='output_directory', |
| - help=('Path to the directory in which build files are' |
| - ' located (must include build type). This will take' |
| - ' precedence over --debug, --release and' |
| - ' --build-directory')) |
| - group.add_option('--num_retries', dest='num_retries', type='int', |
| - default=2, |
| - help=('Number of retries for a test before ' |
| - 'giving up.')) |
| - group.add_option('-v', |
| - '--verbose', |
| - dest='verbose_count', |
| - default=0, |
| - action='count', |
| - help='Verbose level (multiple times for more)') |
| - group.add_option('--flakiness-dashboard-server', |
| - dest='flakiness_dashboard_server', |
| - help=('Address of the server that is hosting the ' |
| - 'Chrome for Android flakiness dashboard.')) |
| - group.add_option('--enable-platform-mode', action='store_true', |
| - help=('Run the test scripts in platform mode, which ' |
| - 'conceptually separates the test runner from the ' |
| - '"device" (local or remote, real or emulated) on ' |
| - 'which the tests are running. [experimental]')) |
| - group.add_option('-e', '--environment', default='local', |
| - help=('Test environment to run in. Must be one of: %s' % |
| - ', '.join(constants.VALID_ENVIRONMENTS))) |
| - group.add_option('--adb-path', |
| - help=('Specify the absolute path of the adb binary that ' |
| - 'should be used.')) |
| - group.add_option('--json-results-file', dest='json_results_file', |
| - help='If set, will dump results in JSON format ' |
| - 'to specified file.') |
| - option_parser.add_option_group(group) |
| - |
| - |
| -def ProcessCommonOptions(options, error_func): |
| + |
| + debug_or_release_group = group.add_mutually_exclusive_group() |
| + debug_or_release_group.add_argument( |
| + '--debug', action='store_const', const='Debug', dest='build_type', |
| + default=default_build_type, |
| + help=('If set, run test suites under out/Debug. ' |
| + 'Default is env var BUILDTYPE or Debug.')) |
| + debug_or_release_group.add_argument( |
| + '--release', action='store_const', const='Release', dest='build_type', |
| + help=('If set, run test suites under out/Release. ' |
| + 'Default is env var BUILDTYPE or Debug.')) |
| + |
| + group.add_argument('--build-directory', dest='build_directory', |
| + help=('Path to the directory in which build files are' |
| + ' located (should not include build type)')) |
| + group.add_argument('--output-directory', dest='output_directory', |
| + help=('Path to the directory in which build files are' |
| + ' located (must include build type). This will take' |
| + ' precedence over --debug, --release and' |
| + ' --build-directory')) |
| + group.add_argument('--num_retries', dest='num_retries', type=int, default=2, |
| + help=('Number of retries for a test before ' |
| + 'giving up (default: %(default)s).')) |
| + group.add_argument('-v', |
| + '--verbose', |
| + dest='verbose_count', |
| + default=0, |
| + action='count', |
| + help='Verbose level (multiple times for more)') |
| + group.add_argument('--flakiness-dashboard-server', |
| + dest='flakiness_dashboard_server', |
| + help=('Address of the server that is hosting the ' |
| + 'Chrome for Android flakiness dashboard.')) |
| + group.add_argument('--enable-platform-mode', action='store_true', |
| + help=('Run the test scripts in platform mode, which ' |
| + 'conceptually separates the test runner from the ' |
| + '"device" (local or remote, real or emulated) on ' |
| + 'which the tests are running. [experimental]')) |
| + group.add_argument('-e', '--environment', default='local', |
| + choices=constants.VALID_ENVIRONMENTS, |
| + help='Test environment to run in (default: %(default)s).') |
| + group.add_argument('--adb-path', |
| + help=('Specify the absolute path of the adb binary that ' |
| + 'should be used.')) |
| + group.add_argument('--json-results-file', dest='json_results_file', |
| + help='If set, will dump results in JSON form ' |
| + 'to specified file.') |
| + |
| + |
| +def ProcessCommonOptions(args): |
| """Processes and handles all common options.""" |
| - run_tests_helper.SetLogLevel(options.verbose_count) |
| - constants.SetBuildType(options.build_type) |
| - if options.build_directory: |
| - constants.SetBuildDirectory(options.build_directory) |
| - if options.output_directory: |
| - constants.SetOutputDirectort(options.output_directory) |
| - if options.adb_path: |
| - constants.SetAdbPath(options.adb_path) |
| + run_tests_helper.SetLogLevel(args.verbose_count) |
| + constants.SetBuildType(args.build_type) |
| + if args.build_directory: |
| + constants.SetBuildDirectory(args.build_directory) |
| + if args.output_directory: |
| + constants.SetOutputDirectort(args.output_directory) |
| + if args.adb_path: |
| + constants.SetAdbPath(args.adb_path) |
| # Some things such as Forwarder require ADB to be in the environment path. |
| adb_dir = os.path.dirname(constants.GetAdbPath()) |
| if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep): |
| os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH'] |
| - if options.environment not in constants.VALID_ENVIRONMENTS: |
| - error_func('--environment must be one of: %s' % |
| - ', '.join(constants.VALID_ENVIRONMENTS)) |
| - |
| - |
| -def AddDeviceOptions(option_parser): |
| - group = optparse.OptionGroup(option_parser, 'Device Options') |
| - group.add_option('-c', dest='cleanup_test_files', |
| - help='Cleanup test files on the device after run', |
| - action='store_true') |
| - group.add_option('--tool', |
| - dest='tool', |
| - help=('Run the test under a tool ' |
| - '(use --tool help to list them)')) |
| - group.add_option('-d', '--device', dest='test_device', |
| - help=('Target device for the test suite ' |
| - 'to run on.')) |
| - option_parser.add_option_group(group) |
| - |
| - |
| -def AddGTestOptions(option_parser): |
| - """Adds gtest options to |option_parser|.""" |
| - |
| - option_parser.usage = '%prog gtest [options]' |
| - option_parser.commands_dict = {} |
| - option_parser.example = '%prog gtest -s base_unittests' |
| - |
| - # TODO(gkanwar): Make this option required |
| - option_parser.add_option('-s', '--suite', dest='suite_name', |
| - help=('Executable name of the test suite to run ' |
| - '(use -s help to list them).')) |
| - option_parser.add_option('-f', '--gtest_filter', '--gtest-filter', |
| - dest='test_filter', |
| - help='googletest-style filter string.') |
| - option_parser.add_option('--gtest_also_run_disabled_tests', |
| - '--gtest-also-run-disabled-tests', |
| - dest='run_disabled', action='store_true', |
| - help='Also run disabled tests if applicable.') |
| - option_parser.add_option('-a', '--test-arguments', dest='test_arguments', |
| - default='', |
| - help='Additional arguments to pass to the test.') |
| - option_parser.add_option('-t', dest='timeout', |
| - help='Timeout to wait for each test', |
| - type='int', |
| - default=60) |
| - option_parser.add_option('--isolate_file_path', |
| - '--isolate-file-path', |
| - dest='isolate_file_path', |
| - help='.isolate file path to override the default ' |
| - 'path') |
| - |
| - AddCommonOptions(option_parser) |
| - AddDeviceOptions(option_parser) |
| - |
| - |
| -def AddLinkerTestOptions(option_parser): |
| - option_parser.usage = '%prog linker' |
| - option_parser.commands_dict = {} |
| - option_parser.example = '%prog linker' |
| - |
| - option_parser.add_option('-f', '--gtest-filter', dest='test_filter', |
| - help='googletest-style filter string.') |
| - AddCommonOptions(option_parser) |
| - AddDeviceOptions(option_parser) |
| - |
| - |
| -def ProcessGTestOptions(options): |
| - """Intercept test suite help to list test suites. |
| - |
| - Args: |
| - options: Command line options. |
| - """ |
| - if options.suite_name == 'help': |
| - print 'Available test suites are:' |
| - for test_suite in (gtest_config.STABLE_TEST_SUITES + |
| - gtest_config.EXPERIMENTAL_TEST_SUITES): |
| - print test_suite |
| - sys.exit(0) |
| - |
| - # Convert to a list, assuming all test suites if nothing was specified. |
| - # TODO(gkanwar): Require having a test suite |
| - if options.suite_name: |
| - options.suite_name = [options.suite_name] |
| - else: |
| - options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES] |
| -def AddJavaTestOptions(option_parser): |
| +def AddDeviceOptions(parser): |
| + """Adds device options to |parser|.""" |
| + group = parser.add_argument_group(title='Device Options') |
| + group.add_argument('-c', dest='cleanup_test_files', |
| + help='Cleanup test files on the device after run', |
| + action='store_true') |
| + group.add_argument('--tool', |
| + dest='tool', |
| + help=('Run the test under a tool ' |
| + '(use --tool help to list them)')) |
| + group.add_argument('-d', '--device', dest='test_device', |
| + help=('Target device for the test suite ' |
| + 'to run on.')) |
| + |
| + |
| +def AddGTestOptions(parser): |
| + """Adds gtest options to |parser|.""" |
| + |
| + gtest_suites = list(gtest_config.STABLE_TEST_SUITES |
| + + gtest_config.EXPERIMENTAL_TEST_SUITES) |
| + |
| + group = parser.add_argument_group('GTest Options') |
| + group.add_argument('-s', '--suite', dest='suite_name', |
| + nargs='+', metavar='SUITE_NAME', required=True, |
| + help=('Executable name of the test suite to run. ' |
| + 'Available suites include (but are not limited to): ' |
| + '%s' % ', '.join('"%s"' % s for s in gtest_suites))) |
|
perezju
2014/12/03 17:36:21
Not sure if we should list them here or, as in the
jbudorick
2014/12/03 17:40:22
I don't like "-s help" very much. A user who wants
klundberg
2014/12/03 17:50:49
My preference would be to avoid having the user ru
|
| + group.add_argument('-f', '--gtest_filter', '--gtest-filter', |
| + dest='test_filter', |
| + help='googletest-style filter string.') |
| + group.add_argument('--gtest_also_run_disabled_tests', |
| + '--gtest-also-run-disabled-tests', |
| + dest='run_disabled', action='store_true', |
| + help='Also run disabled tests if applicable.') |
| + group.add_argument('-a', '--test-arguments', dest='test_arguments', |
| + default='', |
| + help='Additional arguments to pass to the test.') |
| + group.add_argument('-t', dest='timeout', type=int, default=60, |
| + help='Timeout to wait for each test ' |
| + '(default: %(default)s).') |
| + group.add_argument('--isolate_file_path', |
| + '--isolate-file-path', |
| + dest='isolate_file_path', |
| + help='.isolate file path to override the default ' |
| + 'path') |
| + AddDeviceOptions(parser) |
| + AddCommonOptions(parser) |
| + |
| + |
| +def AddLinkerTestOptions(parser): |
| + group = parser.add_argument_group('Linker Test Options') |
| + group.add_argument('-f', '--gtest-filter', dest='test_filter', |
| + help='googletest-style filter string.') |
| + AddCommonOptions(parser) |
| + AddDeviceOptions(parser) |
| + |
| + |
| +def AddJavaTestOptions(argument_group): |
| """Adds the Java test options to |option_parser|.""" |
| - option_parser.add_option('-f', '--test-filter', dest='test_filter', |
| - help=('Test filter (if not fully qualified, ' |
| - 'will run all matches).')) |
| - option_parser.add_option( |
| + argument_group.add_argument( |
| + '-f', '--test-filter', dest='test_filter', |
| + help=('Test filter (if not fully qualified, will run all matches).')) |
| + argument_group.add_argument( |
| '-A', '--annotation', dest='annotation_str', |
| help=('Comma-separated list of annotations. Run only tests with any of ' |
| 'the given annotations. An annotation can be either a key or a ' |
| 'key-values pair. A test that has no annotation is considered ' |
| '"SmallTest".')) |
| - option_parser.add_option( |
| + argument_group.add_argument( |
| '-E', '--exclude-annotation', dest='exclude_annotation_str', |
| help=('Comma-separated list of annotations. Exclude tests with these ' |
| 'annotations.')) |
| - option_parser.add_option( |
| + argument_group.add_argument( |
| '--screenshot', dest='screenshot_failures', action='store_true', |
| help='Capture screenshots of test failures') |
| - option_parser.add_option( |
| + argument_group.add_argument( |
| '--save-perf-json', action='store_true', |
| help='Saves the JSON file for each UI Perf test.') |
| - option_parser.add_option( |
| + argument_group.add_argument( |
| '--official-build', action='store_true', help='Run official build tests.') |
| - option_parser.add_option( |
| + argument_group.add_argument( |
| '--test_data', '--test-data', action='append', default=[], |
| help=('Each instance defines a directory of test data that should be ' |
| 'copied to the target(s) before running the tests. The argument ' |
| @@ -235,411 +209,360 @@ def AddJavaTestOptions(option_parser): |
| 'chromium build directory.')) |
| -def ProcessJavaTestOptions(options): |
| +def ProcessJavaTestOptions(args): |
| """Processes options/arguments and populates |options| with defaults.""" |
| - if options.annotation_str: |
| - options.annotations = options.annotation_str.split(',') |
| - elif options.test_filter: |
| - options.annotations = [] |
| + # TODO(jbudorick): Handle most of this function in argparse. |
| + if args.annotation_str: |
| + args.annotations = args.annotation_str.split(',') |
| + elif args.test_filter: |
| + args.annotations = [] |
| else: |
| - options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', |
| - 'EnormousTest', 'IntegrationTest'] |
| + args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', |
| + 'EnormousTest', 'IntegrationTest'] |
| - if options.exclude_annotation_str: |
| - options.exclude_annotations = options.exclude_annotation_str.split(',') |
| + if args.exclude_annotation_str: |
| + args.exclude_annotations = args.exclude_annotation_str.split(',') |
| else: |
| - options.exclude_annotations = [] |
| - |
| - |
| -def AddInstrumentationTestOptions(option_parser): |
| - """Adds Instrumentation test options to |option_parser|.""" |
| - |
| - option_parser.usage = '%prog instrumentation [options]' |
| - option_parser.commands_dict = {} |
| - option_parser.example = ('%prog instrumentation ' |
| - '--test-apk=ChromeShellTest') |
| - |
| - AddJavaTestOptions(option_parser) |
| - AddCommonOptions(option_parser) |
| - AddDeviceOptions(option_parser) |
| - |
| - option_parser.add_option('-j', '--java-only', action='store_true', |
| - default=False, help='Run only the Java tests.') |
| - option_parser.add_option('-p', '--python-only', action='store_true', |
| - default=False, |
| - help='Run only the host-driven tests.') |
| - option_parser.add_option('--host-driven-root', |
| - help='Root of the host-driven tests.') |
| - option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger', |
| - action='store_true', |
| - help='Wait for debugger.') |
| - option_parser.add_option( |
| - '--test-apk', dest='test_apk', |
| - help=('The name of the apk containing the tests ' |
| - '(without the .apk extension; e.g. "ContentShellTest").')) |
| - option_parser.add_option('--coverage-dir', |
| - help=('Directory in which to place all generated ' |
| - 'EMMA coverage files.')) |
| - option_parser.add_option('--device-flags', dest='device_flags', default='', |
| - help='The relative filepath to a file containing ' |
| - 'command-line flags to set on the device') |
| - option_parser.add_option('--isolate_file_path', |
| - '--isolate-file-path', |
| - dest='isolate_file_path', |
| - help='.isolate file path to override the default ' |
| - 'path') |
| - |
| - |
| -def ProcessInstrumentationOptions(options, error_func): |
| + args.exclude_annotations = [] |
| + |
| + |
| +def AddInstrumentationTestOptions(parser): |
| + """Adds Instrumentation test options to |parser|.""" |
| + |
| + parser.usage = '%(prog)s [options]' |
| + |
| + group = parser.add_argument_group('Instrumentation Test Options') |
| + AddJavaTestOptions(group) |
| + |
| + java_or_python_group = group.add_mutually_exclusive_group() |
| + java_or_python_group.add_argument( |
| + '-j', '--java-only', action='store_false', |
| + dest='run_python_tests', default=True, help='Run only the Java tests.') |
| + java_or_python_group.add_argument( |
| + '-p', '--python-only', action='store_false', |
| + dest='run_java_tests', default=True, |
| + help='Run only the host-driven tests.') |
| + |
| + group.add_argument('--host-driven-root', |
| + help='Root of the host-driven tests.') |
| + group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger', |
| + action='store_true', |
| + help='Wait for debugger.') |
| + group.add_argument('--test-apk', dest='test_apk', required=True, |
| + help=('The name of the apk containing the tests ' |
| + '(without the .apk extension; ' |
| + 'e.g. "ContentShellTest").')) |
| + group.add_argument('--coverage-dir', |
| + help=('Directory in which to place all generated ' |
| + 'EMMA coverage files.')) |
| + group.add_argument('--device-flags', dest='device_flags', default='', |
| + help='The relative filepath to a file containing ' |
| + 'command-line flags to set on the device') |
| + group.add_argument('--isolate_file_path', |
| + '--isolate-file-path', |
| + dest='isolate_file_path', |
| + help='.isolate file path to override the default ' |
| + 'path') |
| + |
| + AddCommonOptions(parser) |
| + AddDeviceOptions(parser) |
| + |
| + |
| +def ProcessInstrumentationOptions(args): |
| """Processes options/arguments and populate |options| with defaults. |
| Args: |
| - options: optparse.Options object. |
| - error_func: Function to call with the error message in case of an error. |
| + args: argparse.Namespace object. |
| Returns: |
| An InstrumentationOptions named tuple which contains all options relevant to |
| instrumentation tests. |
| """ |
| - ProcessJavaTestOptions(options) |
| - |
| - if options.java_only and options.python_only: |
| - error_func('Options java_only (-j) and python_only (-p) ' |
| - 'are mutually exclusive.') |
| - options.run_java_tests = True |
| - options.run_python_tests = True |
| - if options.java_only: |
| - options.run_python_tests = False |
| - elif options.python_only: |
| - options.run_java_tests = False |
| + ProcessJavaTestOptions(args) |
| - if not options.host_driven_root: |
| - options.run_python_tests = False |
| + if not args.host_driven_root: |
| + args.run_python_tests = False |
| - if not options.test_apk: |
| - error_func('--test-apk must be specified.') |
| - |
| - |
| - options.test_apk_path = os.path.join( |
| + args.test_apk_path = os.path.join( |
| constants.GetOutDirectory(), |
| constants.SDK_BUILD_APKS_DIR, |
| - '%s.apk' % options.test_apk) |
| - options.test_apk_jar_path = os.path.join( |
| + '%s.apk' % args.test_apk) |
| + args.test_apk_jar_path = os.path.join( |
| constants.GetOutDirectory(), |
| constants.SDK_BUILD_TEST_JAVALIB_DIR, |
| - '%s.jar' % options.test_apk) |
| - options.test_support_apk_path = '%sSupport%s' % ( |
| - os.path.splitext(options.test_apk_path)) |
| + '%s.jar' % args.test_apk) |
| + args.test_support_apk_path = '%sSupport%s' % ( |
| + os.path.splitext(args.test_apk_path)) |
| - options.test_runner = apk_helper.GetInstrumentationName(options.test_apk_path) |
| + args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path) |
| + # TODO(jbudorick): Get rid of InstrumentationOptions. |
| return instrumentation_test_options.InstrumentationOptions( |
| - options.tool, |
| - options.cleanup_test_files, |
| - options.annotations, |
| - options.exclude_annotations, |
| - options.test_filter, |
| - options.test_data, |
| - options.save_perf_json, |
| - options.screenshot_failures, |
| - options.wait_for_debugger, |
| - options.coverage_dir, |
| - options.test_apk, |
| - options.test_apk_path, |
| - options.test_apk_jar_path, |
| - options.test_runner, |
| - options.test_support_apk_path, |
| - options.device_flags, |
| - options.isolate_file_path |
| + args.tool, |
| + args.cleanup_test_files, |
| + args.annotations, |
| + args.exclude_annotations, |
| + args.test_filter, |
| + args.test_data, |
| + args.save_perf_json, |
| + args.screenshot_failures, |
| + args.wait_for_debugger, |
| + args.coverage_dir, |
| + args.test_apk, |
| + args.test_apk_path, |
| + args.test_apk_jar_path, |
| + args.test_runner, |
| + args.test_support_apk_path, |
| + args.device_flags, |
| + args.isolate_file_path |
| ) |
| -def AddUIAutomatorTestOptions(option_parser): |
| - """Adds UI Automator test options to |option_parser|.""" |
| - |
| - option_parser.usage = '%prog uiautomator [options]' |
| - option_parser.commands_dict = {} |
| - option_parser.example = ( |
| - '%prog uiautomator --test-jar=chrome_shell_uiautomator_tests' |
| - ' --package=chrome_shell') |
| - option_parser.add_option( |
| - '--package', |
| - help=('Package under test. Possible values: %s' % |
| - constants.PACKAGE_INFO.keys())) |
| - option_parser.add_option( |
| - '--test-jar', dest='test_jar', |
| +def AddUIAutomatorTestOptions(parser): |
| + """Adds UI Automator test options to |parser|.""" |
| + |
| + group = parser.add_argument_group('UIAutomator Test Options') |
| + AddJavaTestOptions(group) |
| + group.add_argument( |
| + '--package', required=True, choices=constants.PACKAGE_INFO.keys(), |
| + metavar='PACKAGE', help='Package under test.') |
| + group.add_argument( |
| + '--test-jar', dest='test_jar', required=True, |
| help=('The name of the dexed jar containing the tests (without the ' |
| '.dex.jar extension). Alternatively, this can be a full path ' |
| 'to the jar.')) |
| - AddJavaTestOptions(option_parser) |
| - AddCommonOptions(option_parser) |
| - AddDeviceOptions(option_parser) |
| + AddCommonOptions(parser) |
| + AddDeviceOptions(parser) |
| -def ProcessUIAutomatorOptions(options, error_func): |
| +def ProcessUIAutomatorOptions(args): |
| """Processes UIAutomator options/arguments. |
| Args: |
| - options: optparse.Options object. |
| - error_func: Function to call with the error message in case of an error. |
| + args: argparse.Namespace object. |
| Returns: |
| A UIAutomatorOptions named tuple which contains all options relevant to |
| uiautomator tests. |
| """ |
| - ProcessJavaTestOptions(options) |
| - |
| - if not options.package: |
| - error_func('--package is required.') |
| + ProcessJavaTestOptions(args) |
| - if options.package not in constants.PACKAGE_INFO: |
| - error_func('Invalid package.') |
| - |
| - if not options.test_jar: |
| - error_func('--test-jar must be specified.') |
| - |
| - if os.path.exists(options.test_jar): |
| + if os.path.exists(args.test_jar): |
| # The dexed JAR is fully qualified, assume the info JAR lives along side. |
| - options.uiautomator_jar = options.test_jar |
| + args.uiautomator_jar = args.test_jar |
| else: |
| - options.uiautomator_jar = os.path.join( |
| + args.uiautomator_jar = os.path.join( |
| constants.GetOutDirectory(), |
| constants.SDK_BUILD_JAVALIB_DIR, |
| - '%s.dex.jar' % options.test_jar) |
| - options.uiautomator_info_jar = ( |
| - options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] + |
| + '%s.dex.jar' % args.test_jar) |
| + args.uiautomator_info_jar = ( |
| + args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] + |
| '_java.jar') |
| return uiautomator_test_options.UIAutomatorOptions( |
| - options.tool, |
| - options.cleanup_test_files, |
| - options.annotations, |
| - options.exclude_annotations, |
| - options.test_filter, |
| - options.test_data, |
| - options.save_perf_json, |
| - options.screenshot_failures, |
| - options.uiautomator_jar, |
| - options.uiautomator_info_jar, |
| - options.package) |
| - |
| - |
| -def AddJUnitTestOptions(option_parser): |
| - """Adds junit test options to |option_parser|.""" |
| - option_parser.usage = '%prog junit -s [test suite name]' |
| - option_parser.commands_dict = {} |
| - |
| - option_parser.add_option( |
| - '-s', '--test-suite', dest='test_suite', |
| + args.tool, |
| + args.cleanup_test_files, |
| + args.annotations, |
| + args.exclude_annotations, |
| + args.test_filter, |
| + args.test_data, |
| + args.save_perf_json, |
| + args.screenshot_failures, |
| + args.uiautomator_jar, |
| + args.uiautomator_info_jar, |
| + args.package) |
| + |
| + |
| +def AddJUnitTestOptions(parser): |
| + """Adds junit test options to |parser|.""" |
| + |
| + group = parser.add_argument_group('JUnit Test Options') |
| + group.add_argument( |
| + '-s', '--test-suite', dest='test_suite', required=True, |
| help=('JUnit test suite to run.')) |
| - option_parser.add_option( |
| + group.add_argument( |
| '-f', '--test-filter', dest='test_filter', |
| help='Filters tests googletest-style.') |
| - option_parser.add_option( |
| + group.add_argument( |
| '--package-filter', dest='package_filter', |
| help='Filters tests by package.') |
| - option_parser.add_option( |
| + group.add_argument( |
| '--runner-filter', dest='runner_filter', |
| help='Filters tests by runner class. Must be fully qualified.') |
| - option_parser.add_option( |
| - '--sdk-version', dest='sdk_version', type="int", |
| + group.add_argument( |
| + '--sdk-version', dest='sdk_version', type=int, |
| help='The Android SDK version.') |
| - AddCommonOptions(option_parser) |
| - |
| + AddCommonOptions(parser) |
| -def ProcessJUnitTestOptions(options, error_func): |
| - """Processes all JUnit test options.""" |
| - if not options.test_suite: |
| - error_func('No test suite specified.') |
| - return options |
| +def AddMonkeyTestOptions(parser): |
| + """Adds monkey test options to |parser|.""" |
| -def AddMonkeyTestOptions(option_parser): |
| - """Adds monkey test options to |option_parser|.""" |
| - |
| - option_parser.usage = '%prog monkey [options]' |
| - option_parser.commands_dict = {} |
| - option_parser.example = ( |
| - '%prog monkey --package=chrome_shell') |
| - |
| - option_parser.add_option( |
| - '--package', |
| - help=('Package under test. Possible values: %s' % |
| - constants.PACKAGE_INFO.keys())) |
| - option_parser.add_option( |
| - '--event-count', default=10000, type='int', |
| - help='Number of events to generate [default: %default].') |
| - option_parser.add_option( |
| + group = parser.add_argument_group('Monkey Test Options') |
| + group.add_argument( |
| + '--package', required=True, choices=constants.PACKAGE_INFO.keys(), |
| + metavar='PACKAGE', help='Package under test.') |
| + group.add_argument( |
| + '--event-count', default=10000, type=int, |
| + help='Number of events to generate (default: %(default)s).') |
| + group.add_argument( |
| '--category', default='', |
| help='A list of allowed categories.') |
| - option_parser.add_option( |
| - '--throttle', default=100, type='int', |
| - help='Delay between events (ms) [default: %default]. ') |
| - option_parser.add_option( |
| - '--seed', type='int', |
| + group.add_argument( |
| + '--throttle', default=100, type=int, |
| + help='Delay between events (ms) (default: %(default)s). ') |
| + group.add_argument( |
| + '--seed', type=int, |
| help=('Seed value for pseudo-random generator. Same seed value generates ' |
| 'the same sequence of events. Seed is randomized by default.')) |
| - option_parser.add_option( |
| + group.add_argument( |
| '--extra-args', default='', |
| - help=('String of other args to pass to the command verbatim ' |
| - '[default: "%default"].')) |
| + help=('String of other args to pass to the command verbatim.')) |
| - AddCommonOptions(option_parser) |
| - AddDeviceOptions(option_parser) |
| + AddCommonOptions(parser) |
| + AddDeviceOptions(parser) |
| -def ProcessMonkeyTestOptions(options, error_func): |
| +def ProcessMonkeyTestOptions(args): |
| """Processes all monkey test options. |
| Args: |
| - options: optparse.Options object. |
| - error_func: Function to call with the error message in case of an error. |
| + args: argparse.Namespace object. |
| Returns: |
| A MonkeyOptions named tuple which contains all options relevant to |
| monkey tests. |
| """ |
| - if not options.package: |
| - error_func('--package is required.') |
| - |
| - if options.package not in constants.PACKAGE_INFO: |
| - error_func('Invalid package.') |
| - |
| - category = options.category |
| + # TODO(jbudorick): Handle this directly in argparse with nargs='+' |
| + category = args.category |
| if category: |
| - category = options.category.split(',') |
| + category = args.category.split(',') |
| + # TODO(jbudorick): Get rid of MonkeyOptions. |
| return monkey_test_options.MonkeyOptions( |
| - options.verbose_count, |
| - options.package, |
| - options.event_count, |
| + args.verbose_count, |
| + args.package, |
| + args.event_count, |
| category, |
| - options.throttle, |
| - options.seed, |
| - options.extra_args) |
| - |
| - |
| -def AddPerfTestOptions(option_parser): |
| - """Adds perf test options to |option_parser|.""" |
| - |
| - option_parser.usage = '%prog perf [options]' |
| - option_parser.commands_dict = {} |
| - option_parser.example = ('%prog perf ' |
| - '[--single-step -- command args] or ' |
| - '[--steps perf_steps.json] or ' |
| - '[--print-step step]') |
| - |
| - option_parser.add_option( |
| - '--single-step', |
| - action='store_true', |
| + args.throttle, |
| + args.seed, |
| + args.extra_args) |
| + |
| + |
| +def AddPerfTestOptions(parser): |
| + """Adds perf test options to |parser|.""" |
| + |
| + group = parser.add_argument_group('Perf Test Options') |
| + |
| + class SingleStepAction(argparse.Action): |
| + def __call__(self, parser, namespace, values, option_string=None): |
| + if values and not namespace.single_step: |
| + parser.error('single step command provided, ' |
| + 'but --single-step not specified.') |
| + elif namespace.single_step and not values: |
| + parser.error('--single-step specified, ' |
| + 'but no single step command provided.') |
| + setattr(namespace, self.dest, values) |
| + |
| + step_group = group.add_mutually_exclusive_group(required=True) |
| + # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER. |
| + # This requires removing "--" from client calls. |
| + step_group.add_argument( |
| + '--single-step', action='store_true', |
| help='Execute the given command with retries, but only print the result ' |
| 'for the "most successful" round.') |
| - option_parser.add_option( |
| + step_group.add_argument( |
| '--steps', |
| help='JSON file containing the list of commands to run.') |
| - option_parser.add_option( |
| - '--flaky-steps', |
| - help=('A JSON file containing steps that are flaky ' |
| - 'and will have its exit code ignored.')) |
| - option_parser.add_option( |
| + step_group.add_argument( |
| + '--print-step', |
| + help='The name of a previously executed perf step to print.') |
| + |
| + group.add_argument( |
| '--output-json-list', |
| help='Write a simple list of names from --steps into the given file.') |
| - option_parser.add_option( |
| + group.add_argument( |
| '--collect-chartjson-data', |
| action='store_true', |
| help='Cache the chartjson output from each step for later use.') |
| - option_parser.add_option( |
| + group.add_argument( |
| '--output-chartjson-data', |
| default='', |
| help='Write out chartjson into the given file.') |
| - option_parser.add_option( |
| - '--print-step', |
| - help='The name of a previously executed perf step to print.') |
| - option_parser.add_option( |
| + group.add_argument( |
| + '--flaky-steps', |
| + help=('A JSON file containing steps that are flaky ' |
| + 'and will have its exit code ignored.')) |
| + group.add_argument( |
| '--no-timeout', action='store_true', |
| help=('Do not impose a timeout. Each perf step is responsible for ' |
| 'implementing the timeout logic.')) |
| - option_parser.add_option( |
| + group.add_argument( |
| '-f', '--test-filter', |
| help=('Test filter (will match against the names listed in --steps).')) |
| - option_parser.add_option( |
| - '--dry-run', |
| - action='store_true', |
| + group.add_argument( |
| + '--dry-run', action='store_true', |
| help='Just print the steps without executing.') |
| - AddCommonOptions(option_parser) |
| - AddDeviceOptions(option_parser) |
| + group.add_argument('single_step_command', nargs='*', action=SingleStepAction, |
| + help='If --single-step is specified, the command to run.') |
| + AddCommonOptions(parser) |
| + AddDeviceOptions(parser) |
| -def ProcessPerfTestOptions(options, args, error_func): |
| +def ProcessPerfTestOptions(args): |
| """Processes all perf test options. |
| Args: |
| - options: optparse.Options object. |
| - error_func: Function to call with the error message in case of an error. |
| + args: argparse.Namespace object. |
| Returns: |
| A PerfOptions named tuple which contains all options relevant to |
| perf tests. |
| """ |
| - # Only one of steps, print_step or single_step must be provided. |
| - count = len(filter(None, |
| - [options.steps, options.print_step, options.single_step])) |
| - if count != 1: |
| - error_func('Please specify one of: --steps, --print-step, --single-step.') |
| - single_step = None |
| - if options.single_step: |
| - single_step = ' '.join(args[2:]) |
| + # TODO(jbudorick): Move single_step handling down into the perf tests. |
| + if args.single_step: |
| + args.single_step = ' '.join(args.single_step_command) |
| + # TODO(jbudorick): Get rid of PerfOptions. |
| return perf_test_options.PerfOptions( |
| - options.steps, options.flaky_steps, options.output_json_list, |
| - options.print_step, options.no_timeout, options.test_filter, |
| - options.dry_run, single_step, options.collect_chartjson_data, |
| - options.output_chartjson_data) |
| - |
| + args.steps, args.flaky_steps, args.output_json_list, |
| + args.print_step, args.no_timeout, args.test_filter, |
| + args.dry_run, args.single_step, args.collect_chartjson_data, |
| + args.output_chartjson_data) |
| -def AddPythonTestOptions(option_parser): |
| - option_parser.add_option('-s', '--suite', dest='suite_name', |
| - help=('Name of the test suite to run' |
| - '(use -s help to list them).')) |
| - AddCommonOptions(option_parser) |
| +def AddPythonTestOptions(parser): |
| + group = parser.add_argument_group('Python Test Options') |
| + group.add_argument( |
| + '-s', '--suite', dest='suite_name', metavar='SUITE_NAME', |
| + choices=constants.PYTHON_UNIT_TEST_SUITES.keys(), |
| + help='Name of the test suite to run.') |
| + AddCommonOptions(parser) |
| -def ProcessPythonTestOptions(options, error_func): |
| - if options.suite_name not in constants.PYTHON_UNIT_TEST_SUITES: |
| - available = ('Available test suites: [%s]' % |
| - ', '.join(constants.PYTHON_UNIT_TEST_SUITES.iterkeys())) |
| - if options.suite_name == 'help': |
| - print available |
| - else: |
| - error_func('"%s" is not a valid suite. %s' % |
| - (options.suite_name, available)) |
| - |
| -def _RunGTests(options, devices): |
| +def _RunGTests(args, devices): |
| """Subcommand of RunTestsCommands which runs gtests.""" |
| - ProcessGTestOptions(options) |
| - |
| exit_code = 0 |
| - for suite_name in options.suite_name: |
| - # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for |
| - # the gtest command. |
| + for suite_name in args.suite_name: |
| + # TODO(jbudorick): Either deprecate multi-suite or move its handling down |
| + # into the gtest code. |
| gtest_options = gtest_test_options.GTestOptions( |
| - options.tool, |
| - options.cleanup_test_files, |
| - options.test_filter, |
| - options.run_disabled, |
| - options.test_arguments, |
| - options.timeout, |
| - options.isolate_file_path, |
| + args.tool, |
| + args.cleanup_test_files, |
| + args.test_filter, |
| + args.run_disabled, |
| + args.test_arguments, |
| + args.timeout, |
| + args.isolate_file_path, |
| suite_name) |
| runner_factory, tests = gtest_setup.Setup(gtest_options, devices) |
| results, test_exit_code = test_dispatcher.RunTests( |
| tests, runner_factory, devices, shard=True, test_timeout=None, |
| - num_retries=options.num_retries) |
| + num_retries=args.num_retries) |
| if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
| exit_code = test_exit_code |
| @@ -648,10 +571,10 @@ def _RunGTests(options, devices): |
| results=results, |
| test_type='Unit test', |
| test_package=suite_name, |
| - flakiness_server=options.flakiness_dashboard_server) |
| + flakiness_server=args.flakiness_dashboard_server) |
| - if options.json_results_file: |
| - json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| + if args.json_results_file: |
| + json_results.GenerateJsonResultsFile(results, args.json_results_file) |
| if os.path.isdir(constants.ISOLATE_DEPS_DIR): |
| shutil.rmtree(constants.ISOLATE_DEPS_DIR) |
| @@ -659,55 +582,57 @@ def _RunGTests(options, devices): |
| return exit_code |
| -def _RunLinkerTests(options, devices): |
| +def _RunLinkerTests(args, devices): |
| """Subcommand of RunTestsCommands which runs linker tests.""" |
| - runner_factory, tests = linker_setup.Setup(options, devices) |
| + runner_factory, tests = linker_setup.Setup(args, devices) |
| results, exit_code = test_dispatcher.RunTests( |
| tests, runner_factory, devices, shard=True, test_timeout=60, |
| - num_retries=options.num_retries) |
| + num_retries=args.num_retries) |
| report_results.LogFull( |
| results=results, |
| test_type='Linker test', |
| test_package='ChromiumLinkerTest') |
| - if options.json_results_file: |
| - json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| + if args.json_results_file: |
| + json_results.GenerateJsonResultsFile(results, args.json_results_file) |
| return exit_code |
| -def _RunInstrumentationTests(options, error_func, devices): |
| +def _RunInstrumentationTests(args, devices): |
| """Subcommand of RunTestsCommands which runs instrumentation tests.""" |
| - instrumentation_options = ProcessInstrumentationOptions(options, error_func) |
| + logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices))) |
| - if len(devices) > 1 and options.wait_for_debugger: |
| + instrumentation_options = ProcessInstrumentationOptions(args) |
| + |
| + if len(devices) > 1 and args.wait_for_debugger: |
| logging.warning('Debugger can not be sharded, using first available device') |
| devices = devices[:1] |
| results = base_test_result.TestRunResults() |
| exit_code = 0 |
| - if options.run_java_tests: |
| + if args.run_java_tests: |
| runner_factory, tests = instrumentation_setup.Setup( |
| instrumentation_options, devices) |
| test_results, exit_code = test_dispatcher.RunTests( |
| tests, runner_factory, devices, shard=True, test_timeout=None, |
| - num_retries=options.num_retries) |
| + num_retries=args.num_retries) |
| results.AddTestRunResults(test_results) |
| - if options.run_python_tests: |
| + if args.run_python_tests: |
| runner_factory, tests = host_driven_setup.InstrumentationSetup( |
| - options.host_driven_root, options.official_build, |
| + args.host_driven_root, args.official_build, |
| instrumentation_options) |
| if tests: |
| test_results, test_exit_code = test_dispatcher.RunTests( |
| tests, runner_factory, devices, shard=True, test_timeout=None, |
| - num_retries=options.num_retries) |
| + num_retries=args.num_retries) |
| results.AddTestRunResults(test_results) |
| @@ -715,79 +640,77 @@ def _RunInstrumentationTests(options, error_func, devices): |
| if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
| exit_code = test_exit_code |
| - if options.device_flags: |
| - options.device_flags = os.path.join(constants.DIR_SOURCE_ROOT, |
| - options.device_flags) |
| + if args.device_flags: |
| + args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT, |
| + args.device_flags) |
| report_results.LogFull( |
| results=results, |
| test_type='Instrumentation', |
| - test_package=os.path.basename(options.test_apk), |
| - annotation=options.annotations, |
| - flakiness_server=options.flakiness_dashboard_server) |
| + test_package=os.path.basename(args.test_apk), |
| + annotation=args.annotations, |
| + flakiness_server=args.flakiness_dashboard_server) |
| - if options.json_results_file: |
| - json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| + if args.json_results_file: |
| + json_results.GenerateJsonResultsFile(results, args.json_results_file) |
| return exit_code |
| -def _RunUIAutomatorTests(options, error_func, devices): |
| +def _RunUIAutomatorTests(args, devices): |
| """Subcommand of RunTestsCommands which runs uiautomator tests.""" |
| - uiautomator_options = ProcessUIAutomatorOptions(options, error_func) |
| + uiautomator_options = ProcessUIAutomatorOptions(args) |
| runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) |
| results, exit_code = test_dispatcher.RunTests( |
| tests, runner_factory, devices, shard=True, test_timeout=None, |
| - num_retries=options.num_retries) |
| + num_retries=args.num_retries) |
| report_results.LogFull( |
| results=results, |
| test_type='UIAutomator', |
| - test_package=os.path.basename(options.test_jar), |
| - annotation=options.annotations, |
| - flakiness_server=options.flakiness_dashboard_server) |
| + test_package=os.path.basename(args.test_jar), |
| + annotation=args.annotations, |
| + flakiness_server=args.flakiness_dashboard_server) |
| - if options.json_results_file: |
| - json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| + if args.json_results_file: |
| + json_results.GenerateJsonResultsFile(results, args.json_results_file) |
| return exit_code |
| -def _RunJUnitTests(options, error_func): |
| +def _RunJUnitTests(args): |
| """Subcommand of RunTestsCommand which runs junit tests.""" |
| - junit_options = ProcessJUnitTestOptions(options, error_func) |
| - runner_factory, tests = junit_setup.Setup(junit_options) |
| + runner_factory, tests = junit_setup.Setup(args) |
| _, exit_code = junit_dispatcher.RunTests(tests, runner_factory) |
| - |
| return exit_code |
| -def _RunMonkeyTests(options, error_func, devices): |
| +def _RunMonkeyTests(args, devices): |
| """Subcommand of RunTestsCommands which runs monkey tests.""" |
| - monkey_options = ProcessMonkeyTestOptions(options, error_func) |
| + monkey_options = ProcessMonkeyTestOptions(args) |
| runner_factory, tests = monkey_setup.Setup(monkey_options) |
| results, exit_code = test_dispatcher.RunTests( |
| tests, runner_factory, devices, shard=False, test_timeout=None, |
| - num_retries=options.num_retries) |
| + num_retries=args.num_retries) |
| report_results.LogFull( |
| results=results, |
| test_type='Monkey', |
| test_package='Monkey') |
| - if options.json_results_file: |
| - json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| + if args.json_results_file: |
| + json_results.GenerateJsonResultsFile(results, args.json_results_file) |
| return exit_code |
| -def _RunPerfTests(options, args, error_func): |
| +def _RunPerfTests(args): |
| """Subcommand of RunTestsCommands which runs perf tests.""" |
| - perf_options = ProcessPerfTestOptions(options, args, error_func) |
| + perf_options = ProcessPerfTestOptions(args) |
| # Just save a simple json with a list of test names. |
| if perf_options.output_json_list: |
| @@ -810,15 +733,15 @@ def _RunPerfTests(options, args, error_func): |
| # which increases throughput but have no affinity. |
| results, _ = test_dispatcher.RunTests( |
| tests, runner_factory, devices, shard=False, test_timeout=None, |
| - num_retries=options.num_retries) |
| + num_retries=args.num_retries) |
| report_results.LogFull( |
| results=results, |
| test_type='Perf', |
| test_package='Perf') |
| - if options.json_results_file: |
| - json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| + if args.json_results_file: |
| + json_results.GenerateJsonResultsFile(results, args.json_results_file) |
| if perf_options.single_step: |
| return perf_test_runner.PrintTestOutput('single_step') |
| @@ -830,11 +753,9 @@ def _RunPerfTests(options, args, error_func): |
| return 0 |
| -def _RunPythonTests(options, error_func): |
| +def _RunPythonTests(args): |
| """Subcommand of RunTestsCommand which runs python unit tests.""" |
| - ProcessPythonTestOptions(options, error_func) |
| - |
| - suite_vars = constants.PYTHON_UNIT_TEST_SUITES[options.suite_name] |
| + suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name] |
| suite_path = suite_vars['path'] |
| suite_test_modules = suite_vars['test_modules'] |
| @@ -843,7 +764,7 @@ def _RunPythonTests(options, error_func): |
| suite = unittest.TestSuite() |
| suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m) |
| for m in suite_test_modules) |
| - runner = unittest.TextTestRunner(verbosity=1+options.verbose_count) |
| + runner = unittest.TextTestRunner(verbosity=1+args.verbose_count) |
| return 0 if runner.run(suite).wasSuccessful() else 1 |
| finally: |
| sys.path = sys.path[1:] |
| @@ -872,15 +793,12 @@ def _GetAttachedDevices(test_device=None): |
| return sorted(attached_devices) |
| -def RunTestsCommand(command, options, args, option_parser): |
| +def RunTestsCommand(args, parser): |
| """Checks test type and dispatches to the appropriate function. |
| Args: |
| - command: String indicating the command that was received to trigger |
| - this function. |
| - options: optparse options dictionary. |
| - args: List of extra args from optparse. |
| - option_parser: optparse.OptionParser object. |
| + args: argparse.Namespace object. |
| + parser: argparse.ArgumentParser object. |
| Returns: |
| Integer indicated exit code. |
| @@ -889,47 +807,38 @@ def RunTestsCommand(command, options, args, option_parser): |
| Exception: Unknown command name passed in, or an exception from an |
| individual test runner. |
| """ |
| + command = args.command |
| - # Check for extra arguments |
| - if len(args) > 2 and command != 'perf': |
| - option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:]))) |
| - return constants.ERROR_EXIT_CODE |
| - if command == 'perf': |
| - if ((options.single_step and len(args) <= 2) or |
| - (not options.single_step and len(args) > 2)): |
| - option_parser.error('Unrecognized arguments: %s' % (' '.join(args))) |
| - return constants.ERROR_EXIT_CODE |
| - |
| - ProcessCommonOptions(options, option_parser.error) |
| + ProcessCommonOptions(args) |
| - if options.enable_platform_mode: |
| - return RunTestsInPlatformMode(command, options, option_parser) |
| + if args.enable_platform_mode: |
| + return RunTestsInPlatformMode(args, parser.error) |
| if command in constants.LOCAL_MACHINE_TESTS: |
| devices = [] |
| else: |
| - devices = _GetAttachedDevices(options.test_device) |
| + devices = _GetAttachedDevices(args.test_device) |
| forwarder.Forwarder.RemoveHostLog() |
| if not ports.ResetTestServerPortAllocation(): |
| raise Exception('Failed to reset test server port.') |
| if command == 'gtest': |
| - return _RunGTests(options, devices) |
| + return _RunGTests(args, devices) |
| elif command == 'linker': |
| - return _RunLinkerTests(options, devices) |
| + return _RunLinkerTests(args, devices) |
| elif command == 'instrumentation': |
| - return _RunInstrumentationTests(options, option_parser.error, devices) |
| + return _RunInstrumentationTests(args, devices) |
| elif command == 'uiautomator': |
| - return _RunUIAutomatorTests(options, option_parser.error, devices) |
| + return _RunUIAutomatorTests(args, devices) |
| elif command == 'junit': |
| - return _RunJUnitTests(options, option_parser.error) |
| + return _RunJUnitTests(args) |
| elif command == 'monkey': |
| - return _RunMonkeyTests(options, option_parser.error, devices) |
| + return _RunMonkeyTests(args, devices) |
| elif command == 'perf': |
| - return _RunPerfTests(options, args, option_parser.error) |
| + return _RunPerfTests(args) |
| elif command == 'python': |
| - return _RunPythonTests(options, option_parser.error) |
| + return _RunPythonTests(args) |
| else: |
| raise Exception('Unknown test type.') |
| @@ -940,97 +849,60 @@ _SUPPORTED_IN_PLATFORM_MODE = [ |
| ] |
| -def RunTestsInPlatformMode(command, options, option_parser): |
| +def RunTestsInPlatformMode(args, parser): |
| - if command not in _SUPPORTED_IN_PLATFORM_MODE: |
| - option_parser.error('%s is not yet supported in platform mode' % command) |
| + if args.command not in _SUPPORTED_IN_PLATFORM_MODE: |
| + parser.error('%s is not yet supported in platform mode' % args.command) |
| - with environment_factory.CreateEnvironment( |
| - command, options, option_parser.error) as env: |
| - with test_instance_factory.CreateTestInstance( |
| - command, options, option_parser.error) as test: |
| + with environment_factory.CreateEnvironment(args, parser.error) as env: |
| + with test_instance_factory.CreateTestInstance(args, parser.error) as test: |
| with test_run_factory.CreateTestRun( |
| - options, env, test, option_parser.error) as test_run: |
| + args, env, test, parser.error) as test_run: |
| results = test_run.RunTests() |
| report_results.LogFull( |
| results=results, |
| test_type=test.TestType(), |
| test_package=test_run.TestPackage(), |
| - annotation=options.annotations, |
| - flakiness_server=options.flakiness_dashboard_server) |
| + annotation=args.annotations, |
| + flakiness_server=args.flakiness_dashboard_server) |
| - if options.json_results_file: |
| + if args.json_results_file: |
| json_results.GenerateJsonResultsFile( |
| - results, options.json_results_file) |
| + results, args.json_results_file) |
| return results |
| -def HelpCommand(command, _options, args, option_parser): |
| - """Display help for a certain command, or overall help. |
| - |
| - Args: |
| - command: String indicating the command that was received to trigger |
| - this function. |
| - options: optparse options dictionary. unused. |
| - args: List of extra args from optparse. |
| - option_parser: optparse.OptionParser object. |
| - |
| - Returns: |
| - Integer indicated exit code. |
| - """ |
| - # If we don't have any args, display overall help |
| - if len(args) < 3: |
| - option_parser.print_help() |
| - return 0 |
| - # If we have too many args, print an error |
| - if len(args) > 3: |
| - option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:]))) |
| - return constants.ERROR_EXIT_CODE |
| - |
| - command = args[2] |
| - |
| - if command not in VALID_COMMANDS: |
| - option_parser.error('Unrecognized command.') |
| - |
| - # Treat the help command as a special case. We don't care about showing a |
| - # specific help page for itself. |
| - if command == 'help': |
| - option_parser.print_help() |
| - return 0 |
| - |
| - VALID_COMMANDS[command].add_options_func(option_parser) |
| - option_parser.usage = '%prog ' + command + ' [options]' |
| - option_parser.commands_dict = {} |
| - option_parser.print_help() |
| - |
| - return 0 |
| - |
| - |
| -# Define a named tuple for the values in the VALID_COMMANDS dictionary so the |
| -# syntax is a bit prettier. The tuple is two functions: (add options, run |
| -# command). |
| -CommandFunctionTuple = collections.namedtuple( |
| - 'CommandFunctionTuple', ['add_options_func', 'run_command_func']) |
| +CommandConfigTuple = collections.namedtuple( |
| + 'CommandConfigTuple', |
| + ['add_options_func', 'help_txt']) |
| VALID_COMMANDS = { |
| - 'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand), |
| - 'instrumentation': CommandFunctionTuple( |
| - AddInstrumentationTestOptions, RunTestsCommand), |
| - 'uiautomator': CommandFunctionTuple( |
| - AddUIAutomatorTestOptions, RunTestsCommand), |
| - 'junit': CommandFunctionTuple( |
| - AddJUnitTestOptions, RunTestsCommand), |
| - 'monkey': CommandFunctionTuple( |
| - AddMonkeyTestOptions, RunTestsCommand), |
| - 'perf': CommandFunctionTuple( |
| - AddPerfTestOptions, RunTestsCommand), |
| - 'python': CommandFunctionTuple( |
| - AddPythonTestOptions, RunTestsCommand), |
| - 'linker': CommandFunctionTuple( |
| - AddLinkerTestOptions, RunTestsCommand), |
| - 'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand) |
| - } |
| + 'gtest': CommandConfigTuple( |
| + AddGTestOptions, |
| + 'googletest-based C++ tests'), |
| + 'instrumentation': CommandConfigTuple( |
| + AddInstrumentationTestOptions, |
| + 'InstrumentationTestCase-based Java tests'), |
| + 'uiautomator': CommandConfigTuple( |
| + AddUIAutomatorTestOptions, |
| + "Tests that run via Android's uiautomator command"), |
| + 'junit': CommandConfigTuple( |
| + AddJUnitTestOptions, |
| + 'JUnit4-based Java tests'), |
| + 'monkey': CommandConfigTuple( |
| + AddMonkeyTestOptions, |
| + "Tests based on Android's monkey"), |
| + 'perf': CommandConfigTuple( |
| + AddPerfTestOptions, |
| + 'Performance tests'), |
| + 'python': CommandConfigTuple( |
| + AddPythonTestOptions, |
| + 'Python tests based on unittest.TestCase'), |
| + 'linker': CommandConfigTuple( |
| + AddLinkerTestOptions, |
| + 'Linker tests'), |
| +} |
| def DumpThreadStacks(_signal, _frame): |
| @@ -1040,9 +912,21 @@ def DumpThreadStacks(_signal, _frame): |
| def main(): |
| signal.signal(signal.SIGUSR1, DumpThreadStacks) |
| - option_parser = command_option_parser.CommandOptionParser( |
| - commands_dict=VALID_COMMANDS) |
| - return command_option_parser.ParseAndExecute(option_parser) |
| + |
| + parser = argparse.ArgumentParser() |
| + command_parsers = parser.add_subparsers(title='test types', |
| + dest='command') |
| + |
| + for test_type, config in sorted(VALID_COMMANDS.iteritems(), |
| + key=lambda x: x[0]): |
| + subparser = command_parsers.add_parser( |
| + test_type, usage='%(prog)s [options]', help=config.help_txt) |
| + config.add_options_func(subparser) |
| + |
| + args = parser.parse_args() |
| + RunTestsCommand(args, parser) |
| + |
| + return 0 |
| if __name__ == '__main__': |