| Index: build/android/test_runner.py
 | 
| diff --git a/build/android/test_runner.py b/build/android/test_runner.py
 | 
| index e5016e2e9c4214048b622df1bd5d8ce5f08094d4..f38b76e6af8a64d9a5da93c06b84cf2862f454a9 100755
 | 
| --- a/build/android/test_runner.py
 | 
| +++ b/build/android/test_runner.py
 | 
| @@ -6,9 +6,9 @@
 | 
|  
 | 
|  """Runs all types of tests from one unified interface."""
 | 
|  
 | 
| -import argparse
 | 
|  import collections
 | 
|  import logging
 | 
| +import optparse
 | 
|  import os
 | 
|  import shutil
 | 
|  import signal
 | 
| @@ -44,161 +44,189 @@ from pylib.results import report_results
 | 
|  from pylib.uiautomator import setup as uiautomator_setup
 | 
|  from pylib.uiautomator import test_options as uiautomator_test_options
 | 
|  from pylib.utils import apk_helper
 | 
| +from pylib.utils import command_option_parser
 | 
|  from pylib.utils import reraiser_thread
 | 
|  from pylib.utils import run_tests_helper
 | 
|  
 | 
|  
 | 
| -def AddCommonOptions(parser):
 | 
| -  """Adds all common options to |parser|."""
 | 
| -
 | 
| -  group = parser.add_argument_group('Common Options')
 | 
| +def AddCommonOptions(option_parser):
 | 
| +  """Adds all common options to |option_parser|."""
 | 
|  
 | 
| +  group = optparse.OptionGroup(option_parser, 'Common Options')
 | 
|    default_build_type = os.environ.get('BUILDTYPE', 'Debug')
 | 
| -
 | 
| -  debug_or_release_group = group.add_mutually_exclusive_group()
 | 
| -  debug_or_release_group.add_argument(
 | 
| -      '--debug', action='store_const', const='Debug', dest='build_type',
 | 
| -      default=default_build_type,
 | 
| -      help=('If set, run test suites under out/Debug. '
 | 
| -            'Default is env var BUILDTYPE or Debug.'))
 | 
| -  debug_or_release_group.add_argument(
 | 
| -      '--release', action='store_const', const='Release', dest='build_type',
 | 
| -      help=('If set, run test suites under out/Release. '
 | 
| -            'Default is env var BUILDTYPE or Debug.'))
 | 
| -
 | 
| -  group.add_argument('--build-directory', dest='build_directory',
 | 
| -                     help=('Path to the directory in which build files are'
 | 
| -                           ' located (should not include build type)'))
 | 
| -  group.add_argument('--output-directory', dest='output_directory',
 | 
| -                     help=('Path to the directory in which build files are'
 | 
| -                           ' located (must include build type). This will take'
 | 
| -                           ' precedence over --debug, --release and'
 | 
| -                           ' --build-directory'))
 | 
| -  group.add_argument('--num_retries', dest='num_retries', type=int, default=2,
 | 
| -                     help=('Number of retries for a test before '
 | 
| -                           'giving up (default: %(default)s).'))
 | 
| -  group.add_argument('-v',
 | 
| -                     '--verbose',
 | 
| -                     dest='verbose_count',
 | 
| -                     default=0,
 | 
| -                     action='count',
 | 
| -                     help='Verbose level (multiple times for more)')
 | 
| -  group.add_argument('--flakiness-dashboard-server',
 | 
| -                     dest='flakiness_dashboard_server',
 | 
| -                     help=('Address of the server that is hosting the '
 | 
| -                           'Chrome for Android flakiness dashboard.'))
 | 
| -  group.add_argument('--enable-platform-mode', action='store_true',
 | 
| -                     help=('Run the test scripts in platform mode, which '
 | 
| -                           'conceptually separates the test runner from the '
 | 
| -                           '"device" (local or remote, real or emulated) on '
 | 
| -                           'which the tests are running. [experimental]'))
 | 
| -  group.add_argument('-e', '--environment', default='local',
 | 
| -                     choices=constants.VALID_ENVIRONMENTS,
 | 
| -                     help='Test environment to run in (default: %(default)s).')
 | 
| -  group.add_argument('--adb-path',
 | 
| -                     help=('Specify the absolute path of the adb binary that '
 | 
| -                           'should be used.'))
 | 
| -  group.add_argument('--json-results-file', dest='json_results_file',
 | 
| -                     help='If set, will dump results in JSON form '
 | 
| -                          'to specified file.')
 | 
| -
 | 
| -
 | 
| -def ProcessCommonOptions(args):
 | 
| +  group.add_option('--debug', action='store_const', const='Debug',
 | 
| +                   dest='build_type', default=default_build_type,
 | 
| +                   help=('If set, run test suites under out/Debug. '
 | 
| +                         'Default is env var BUILDTYPE or Debug.'))
 | 
| +  group.add_option('--release', action='store_const',
 | 
| +                   const='Release', dest='build_type',
 | 
| +                   help=('If set, run test suites under out/Release.'
 | 
| +                         ' Default is env var BUILDTYPE or Debug.'))
 | 
| +  group.add_option('--build-directory', dest='build_directory',
 | 
| +                   help=('Path to the directory in which build files are'
 | 
| +                         ' located (should not include build type)'))
 | 
| +  group.add_option('--output-directory', dest='output_directory',
 | 
| +                   help=('Path to the directory in which build files are'
 | 
| +                         ' located (must include build type). This will take'
 | 
| +                         ' precedence over --debug, --release and'
 | 
| +                         ' --build-directory'))
 | 
| +  group.add_option('--num_retries', dest='num_retries', type='int',
 | 
| +                   default=2,
 | 
| +                   help=('Number of retries for a test before '
 | 
| +                         'giving up.'))
 | 
| +  group.add_option('-v',
 | 
| +                   '--verbose',
 | 
| +                   dest='verbose_count',
 | 
| +                   default=0,
 | 
| +                   action='count',
 | 
| +                   help='Verbose level (multiple times for more)')
 | 
| +  group.add_option('--flakiness-dashboard-server',
 | 
| +                   dest='flakiness_dashboard_server',
 | 
| +                   help=('Address of the server that is hosting the '
 | 
| +                         'Chrome for Android flakiness dashboard.'))
 | 
| +  group.add_option('--enable-platform-mode', action='store_true',
 | 
| +                   help=('Run the test scripts in platform mode, which '
 | 
| +                         'conceptually separates the test runner from the '
 | 
| +                         '"device" (local or remote, real or emulated) on '
 | 
| +                         'which the tests are running. [experimental]'))
 | 
| +  group.add_option('-e', '--environment', default='local',
 | 
| +                   help=('Test environment to run in. Must be one of: %s' %
 | 
| +                         ', '.join(constants.VALID_ENVIRONMENTS)))
 | 
| +  group.add_option('--adb-path',
 | 
| +                   help=('Specify the absolute path of the adb binary that '
 | 
| +                         'should be used.'))
 | 
| +  group.add_option('--json-results-file', dest='json_results_file',
 | 
| +                   help='If set, will dump results in JSON format '
 | 
| +                        'to specified file.')
 | 
| +  option_parser.add_option_group(group)
 | 
| +
 | 
| +
 | 
| +def ProcessCommonOptions(options, error_func):
 | 
|    """Processes and handles all common options."""
 | 
| -  run_tests_helper.SetLogLevel(args.verbose_count)
 | 
| -  constants.SetBuildType(args.build_type)
 | 
| -  if args.build_directory:
 | 
| -    constants.SetBuildDirectory(args.build_directory)
 | 
| -  if args.output_directory:
 | 
| -    constants.SetOutputDirectort(args.output_directory)
 | 
| -  if args.adb_path:
 | 
| -    constants.SetAdbPath(args.adb_path)
 | 
| +  run_tests_helper.SetLogLevel(options.verbose_count)
 | 
| +  constants.SetBuildType(options.build_type)
 | 
| +  if options.build_directory:
 | 
| +    constants.SetBuildDirectory(options.build_directory)
 | 
| +  if options.output_directory:
 | 
| +    constants.SetOutputDirectort(options.output_directory)
 | 
| +  if options.adb_path:
 | 
| +    constants.SetAdbPath(options.adb_path)
 | 
|    # Some things such as Forwarder require ADB to be in the environment path.
 | 
|    adb_dir = os.path.dirname(constants.GetAdbPath())
 | 
|    if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
 | 
|      os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
 | 
| +  if options.environment not in constants.VALID_ENVIRONMENTS:
 | 
| +    error_func('--environment must be one of: %s' %
 | 
| +               ', '.join(constants.VALID_ENVIRONMENTS))
 | 
| +
 | 
| +
 | 
| +def AddDeviceOptions(option_parser):
 | 
| +  group = optparse.OptionGroup(option_parser, 'Device Options')
 | 
| +  group.add_option('-c', dest='cleanup_test_files',
 | 
| +                   help='Cleanup test files on the device after run',
 | 
| +                   action='store_true')
 | 
| +  group.add_option('--tool',
 | 
| +                   dest='tool',
 | 
| +                   help=('Run the test under a tool '
 | 
| +                         '(use --tool help to list them)'))
 | 
| +  group.add_option('-d', '--device', dest='test_device',
 | 
| +                   help=('Target device for the test suite '
 | 
| +                         'to run on.'))
 | 
| +  option_parser.add_option_group(group)
 | 
| +
 | 
| +
 | 
| +def AddGTestOptions(option_parser):
 | 
| +  """Adds gtest options to |option_parser|."""
 | 
| +
 | 
| +  option_parser.usage = '%prog gtest [options]'
 | 
| +  option_parser.commands_dict = {}
 | 
| +  option_parser.example = '%prog gtest -s base_unittests'
 | 
| +
 | 
| +  # TODO(gkanwar): Make this option required
 | 
| +  option_parser.add_option('-s', '--suite', dest='suite_name',
 | 
| +                           help=('Executable name of the test suite to run '
 | 
| +                                 '(use -s help to list them).'))
 | 
| +  option_parser.add_option('-f', '--gtest_filter', '--gtest-filter',
 | 
| +                           dest='test_filter',
 | 
| +                           help='googletest-style filter string.')
 | 
| +  option_parser.add_option('--gtest_also_run_disabled_tests',
 | 
| +                           '--gtest-also-run-disabled-tests',
 | 
| +                           dest='run_disabled', action='store_true',
 | 
| +                           help='Also run disabled tests if applicable.')
 | 
| +  option_parser.add_option('-a', '--test-arguments', dest='test_arguments',
 | 
| +                           default='',
 | 
| +                           help='Additional arguments to pass to the test.')
 | 
| +  option_parser.add_option('-t', dest='timeout',
 | 
| +                           help='Timeout to wait for each test',
 | 
| +                           type='int',
 | 
| +                           default=60)
 | 
| +  option_parser.add_option('--isolate_file_path',
 | 
| +                           '--isolate-file-path',
 | 
| +                           dest='isolate_file_path',
 | 
| +                           help='.isolate file path to override the default '
 | 
| +                                'path')
 | 
| +
 | 
| +  AddCommonOptions(option_parser)
 | 
| +  AddDeviceOptions(option_parser)
 | 
| +
 | 
| +
 | 
| +def AddLinkerTestOptions(option_parser):
 | 
| +  option_parser.usage = '%prog linker'
 | 
| +  option_parser.commands_dict = {}
 | 
| +  option_parser.example = '%prog linker'
 | 
| +
 | 
| +  option_parser.add_option('-f', '--gtest-filter', dest='test_filter',
 | 
| +                           help='googletest-style filter string.')
 | 
| +  AddCommonOptions(option_parser)
 | 
| +  AddDeviceOptions(option_parser)
 | 
| +
 | 
| +
 | 
| +def ProcessGTestOptions(options):
 | 
| +  """Intercept test suite help to list test suites.
 | 
| +
 | 
| +  Args:
 | 
| +    options: Command line options.
 | 
| +  """
 | 
| +  if options.suite_name == 'help':
 | 
| +    print 'Available test suites are:'
 | 
| +    for test_suite in (gtest_config.STABLE_TEST_SUITES +
 | 
| +                       gtest_config.EXPERIMENTAL_TEST_SUITES):
 | 
| +      print test_suite
 | 
| +    sys.exit(0)
 | 
| +
 | 
| +  # Convert to a list, assuming all test suites if nothing was specified.
 | 
| +  # TODO(gkanwar): Require having a test suite
 | 
| +  if options.suite_name:
 | 
| +    options.suite_name = [options.suite_name]
 | 
| +  else:
 | 
| +    options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES]
 | 
|  
 | 
|  
 | 
| -def AddDeviceOptions(parser):
 | 
| -  """Adds device options to |parser|."""
 | 
| -  group = parser.add_argument_group(title='Device Options')
 | 
| -  group.add_argument('-c', dest='cleanup_test_files',
 | 
| -                     help='Cleanup test files on the device after run',
 | 
| -                     action='store_true')
 | 
| -  group.add_argument('--tool',
 | 
| -                     dest='tool',
 | 
| -                     help=('Run the test under a tool '
 | 
| -                           '(use --tool help to list them)'))
 | 
| -  group.add_argument('-d', '--device', dest='test_device',
 | 
| -                     help=('Target device for the test suite '
 | 
| -                           'to run on.'))
 | 
| -
 | 
| -
 | 
| -def AddGTestOptions(parser):
 | 
| -  """Adds gtest options to |parser|."""
 | 
| -
 | 
| -  gtest_suites = list(gtest_config.STABLE_TEST_SUITES
 | 
| -                      + gtest_config.EXPERIMENTAL_TEST_SUITES)
 | 
| -
 | 
| -  group = parser.add_argument_group('GTest Options')
 | 
| -  group.add_argument('-s', '--suite', dest='suite_name', choices=gtest_suites,
 | 
| -                     nargs='+', metavar='SUITE_NAME', required=True,
 | 
| -                     help=('Executable name of the test suite to run.'))
 | 
| -  group.add_argument('-f', '--gtest_filter', '--gtest-filter',
 | 
| -                     dest='test_filter',
 | 
| -                     help='googletest-style filter string.')
 | 
| -  group.add_argument('--gtest_also_run_disabled_tests',
 | 
| -                     '--gtest-also-run-disabled-tests',
 | 
| -                     dest='run_disabled', action='store_true',
 | 
| -                     help='Also run disabled tests if applicable.')
 | 
| -  group.add_argument('-a', '--test-arguments', dest='test_arguments',
 | 
| -                     default='',
 | 
| -                     help='Additional arguments to pass to the test.')
 | 
| -  group.add_argument('-t', dest='timeout', type=int, default=60,
 | 
| -                     help='Timeout to wait for each test '
 | 
| -                          '(default: %(default)s).')
 | 
| -  group.add_argument('--isolate_file_path',
 | 
| -                     '--isolate-file-path',
 | 
| -                     dest='isolate_file_path',
 | 
| -                     help='.isolate file path to override the default '
 | 
| -                          'path')
 | 
| -  AddDeviceOptions(parser)
 | 
| -  AddCommonOptions(parser)
 | 
| -
 | 
| -
 | 
| -def AddLinkerTestOptions(parser):
 | 
| -  group = parser.add_argument_group('Linker Test Options')
 | 
| -  group.add_argument('-f', '--gtest-filter', dest='test_filter',
 | 
| -                     help='googletest-style filter string.')
 | 
| -  AddCommonOptions(parser)
 | 
| -  AddDeviceOptions(parser)
 | 
| -
 | 
| -
 | 
| -def AddJavaTestOptions(argument_group):
 | 
| +def AddJavaTestOptions(option_parser):
 | 
|    """Adds the Java test options to |option_parser|."""
 | 
|  
 | 
| -  argument_group.add_argument(
 | 
| -      '-f', '--test-filter', dest='test_filter',
 | 
| -      help=('Test filter (if not fully qualified, will run all matches).'))
 | 
| -  argument_group.add_argument(
 | 
| +  option_parser.add_option('-f', '--test-filter', dest='test_filter',
 | 
| +                           help=('Test filter (if not fully qualified, '
 | 
| +                                 'will run all matches).'))
 | 
| +  option_parser.add_option(
 | 
|        '-A', '--annotation', dest='annotation_str',
 | 
|        help=('Comma-separated list of annotations. Run only tests with any of '
 | 
|              'the given annotations. An annotation can be either a key or a '
 | 
|              'key-values pair. A test that has no annotation is considered '
 | 
|              '"SmallTest".'))
 | 
| -  argument_group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '-E', '--exclude-annotation', dest='exclude_annotation_str',
 | 
|        help=('Comma-separated list of annotations. Exclude tests with these '
 | 
|              'annotations.'))
 | 
| -  argument_group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '--screenshot', dest='screenshot_failures', action='store_true',
 | 
|        help='Capture screenshots of test failures')
 | 
| -  argument_group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '--save-perf-json', action='store_true',
 | 
|        help='Saves the JSON file for each UI Perf test.')
 | 
| -  argument_group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '--official-build', action='store_true', help='Run official build tests.')
 | 
| -  argument_group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '--test_data', '--test-data', action='append', default=[],
 | 
|        help=('Each instance defines a directory of test data that should be '
 | 
|              'copied to the target(s) before running the tests. The argument '
 | 
| @@ -207,360 +235,411 @@ def AddJavaTestOptions(argument_group):
 | 
|              'chromium build directory.'))
 | 
|  
 | 
|  
 | 
| -def ProcessJavaTestOptions(args):
 | 
| +def ProcessJavaTestOptions(options):
 | 
|    """Processes options/arguments and populates |options| with defaults."""
 | 
|  
 | 
| -  # TODO(jbudorick): Handle most of this function in argparse.
 | 
| -  if args.annotation_str:
 | 
| -    args.annotations = args.annotation_str.split(',')
 | 
| -  elif args.test_filter:
 | 
| -    args.annotations = []
 | 
| +  if options.annotation_str:
 | 
| +    options.annotations = options.annotation_str.split(',')
 | 
| +  elif options.test_filter:
 | 
| +    options.annotations = []
 | 
|    else:
 | 
| -    args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
 | 
| -                        'EnormousTest', 'IntegrationTest']
 | 
| +    options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
 | 
| +                           'EnormousTest', 'IntegrationTest']
 | 
|  
 | 
| -  if args.exclude_annotation_str:
 | 
| -    args.exclude_annotations = args.exclude_annotation_str.split(',')
 | 
| +  if options.exclude_annotation_str:
 | 
| +    options.exclude_annotations = options.exclude_annotation_str.split(',')
 | 
|    else:
 | 
| -    args.exclude_annotations = []
 | 
| -
 | 
| -
 | 
| -def AddInstrumentationTestOptions(parser):
 | 
| -  """Adds Instrumentation test options to |parser|."""
 | 
| -
 | 
| -  parser.usage = '%(prog)s [options]'
 | 
| -
 | 
| -  group = parser.add_argument_group('Instrumentation Test Options')
 | 
| -  AddJavaTestOptions(group)
 | 
| -
 | 
| -  java_or_python_group = group.add_mutually_exclusive_group()
 | 
| -  java_or_python_group.add_argument(
 | 
| -      '-j', '--java-only', action='store_false',
 | 
| -      dest='run_python_tests', default=True, help='Run only the Java tests.')
 | 
| -  java_or_python_group.add_argument(
 | 
| -      '-p', '--python-only', action='store_false',
 | 
| -      dest='run_java_tests', default=True,
 | 
| -      help='Run only the host-driven tests.')
 | 
| -
 | 
| -  group.add_argument('--host-driven-root',
 | 
| -                     help='Root of the host-driven tests.')
 | 
| -  group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
 | 
| -                     action='store_true',
 | 
| -                     help='Wait for debugger.')
 | 
| -  group.add_argument('--test-apk', dest='test_apk', required=True,
 | 
| -                     help=('The name of the apk containing the tests '
 | 
| -                           '(without the .apk extension; '
 | 
| -                           'e.g. "ContentShellTest").'))
 | 
| -  group.add_argument('--coverage-dir',
 | 
| -                     help=('Directory in which to place all generated '
 | 
| -                           'EMMA coverage files.'))
 | 
| -  group.add_argument('--device-flags', dest='device_flags', default='',
 | 
| -                     help='The relative filepath to a file containing '
 | 
| -                          'command-line flags to set on the device')
 | 
| -  group.add_argument('--isolate_file_path',
 | 
| -                     '--isolate-file-path',
 | 
| -                     dest='isolate_file_path',
 | 
| -                     help='.isolate file path to override the default '
 | 
| -                          'path')
 | 
| -
 | 
| -  AddCommonOptions(parser)
 | 
| -  AddDeviceOptions(parser)
 | 
| -
 | 
| -
 | 
| -def ProcessInstrumentationOptions(args):
 | 
| +    options.exclude_annotations = []
 | 
| +
 | 
| +
 | 
| +def AddInstrumentationTestOptions(option_parser):
 | 
| +  """Adds Instrumentation test options to |option_parser|."""
 | 
| +
 | 
| +  option_parser.usage = '%prog instrumentation [options]'
 | 
| +  option_parser.commands_dict = {}
 | 
| +  option_parser.example = ('%prog instrumentation '
 | 
| +                           '--test-apk=ChromeShellTest')
 | 
| +
 | 
| +  AddJavaTestOptions(option_parser)
 | 
| +  AddCommonOptions(option_parser)
 | 
| +  AddDeviceOptions(option_parser)
 | 
| +
 | 
| +  option_parser.add_option('-j', '--java-only', action='store_true',
 | 
| +                           default=False, help='Run only the Java tests.')
 | 
| +  option_parser.add_option('-p', '--python-only', action='store_true',
 | 
| +                           default=False,
 | 
| +                           help='Run only the host-driven tests.')
 | 
| +  option_parser.add_option('--host-driven-root',
 | 
| +                           help='Root of the host-driven tests.')
 | 
| +  option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger',
 | 
| +                           action='store_true',
 | 
| +                           help='Wait for debugger.')
 | 
| +  option_parser.add_option(
 | 
| +      '--test-apk', dest='test_apk',
 | 
| +      help=('The name of the apk containing the tests '
 | 
| +            '(without the .apk extension; e.g. "ContentShellTest").'))
 | 
| +  option_parser.add_option('--coverage-dir',
 | 
| +                           help=('Directory in which to place all generated '
 | 
| +                                 'EMMA coverage files.'))
 | 
| +  option_parser.add_option('--device-flags', dest='device_flags', default='',
 | 
| +                           help='The relative filepath to a file containing '
 | 
| +                                'command-line flags to set on the device')
 | 
| +  option_parser.add_option('--isolate_file_path',
 | 
| +                           '--isolate-file-path',
 | 
| +                           dest='isolate_file_path',
 | 
| +                           help='.isolate file path to override the default '
 | 
| +                                'path')
 | 
| +
 | 
| +
 | 
| +def ProcessInstrumentationOptions(options, error_func):
 | 
|    """Processes options/arguments and populate |options| with defaults.
 | 
|  
 | 
|    Args:
 | 
| -    args: argparse.Namespace object.
 | 
| +    options: optparse.Options object.
 | 
| +    error_func: Function to call with the error message in case of an error.
 | 
|  
 | 
|    Returns:
 | 
|      An InstrumentationOptions named tuple which contains all options relevant to
 | 
|      instrumentation tests.
 | 
|    """
 | 
|  
 | 
| -  ProcessJavaTestOptions(args)
 | 
| +  ProcessJavaTestOptions(options)
 | 
| +
 | 
| +  if options.java_only and options.python_only:
 | 
| +    error_func('Options java_only (-j) and python_only (-p) '
 | 
| +               'are mutually exclusive.')
 | 
| +  options.run_java_tests = True
 | 
| +  options.run_python_tests = True
 | 
| +  if options.java_only:
 | 
| +    options.run_python_tests = False
 | 
| +  elif options.python_only:
 | 
| +    options.run_java_tests = False
 | 
|  
 | 
| -  if not args.host_driven_root:
 | 
| -    args.run_python_tests = False
 | 
| +  if not options.host_driven_root:
 | 
| +    options.run_python_tests = False
 | 
|  
 | 
| -  args.test_apk_path = os.path.join(
 | 
| +  if not options.test_apk:
 | 
| +    error_func('--test-apk must be specified.')
 | 
| +
 | 
| +
 | 
| +  options.test_apk_path = os.path.join(
 | 
|        constants.GetOutDirectory(),
 | 
|        constants.SDK_BUILD_APKS_DIR,
 | 
| -      '%s.apk' % args.test_apk)
 | 
| -  args.test_apk_jar_path = os.path.join(
 | 
| +      '%s.apk' % options.test_apk)
 | 
| +  options.test_apk_jar_path = os.path.join(
 | 
|        constants.GetOutDirectory(),
 | 
|        constants.SDK_BUILD_TEST_JAVALIB_DIR,
 | 
| -      '%s.jar' %  args.test_apk)
 | 
| -  args.test_support_apk_path = '%sSupport%s' % (
 | 
| -      os.path.splitext(args.test_apk_path))
 | 
| +      '%s.jar' %  options.test_apk)
 | 
| +  options.test_support_apk_path = '%sSupport%s' % (
 | 
| +      os.path.splitext(options.test_apk_path))
 | 
|  
 | 
| -  args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
 | 
| +  options.test_runner = apk_helper.GetInstrumentationName(options.test_apk_path)
 | 
|  
 | 
| -  # TODO(jbudorick): Get rid of InstrumentationOptions.
 | 
|    return instrumentation_test_options.InstrumentationOptions(
 | 
| -      args.tool,
 | 
| -      args.cleanup_test_files,
 | 
| -      args.annotations,
 | 
| -      args.exclude_annotations,
 | 
| -      args.test_filter,
 | 
| -      args.test_data,
 | 
| -      args.save_perf_json,
 | 
| -      args.screenshot_failures,
 | 
| -      args.wait_for_debugger,
 | 
| -      args.coverage_dir,
 | 
| -      args.test_apk,
 | 
| -      args.test_apk_path,
 | 
| -      args.test_apk_jar_path,
 | 
| -      args.test_runner,
 | 
| -      args.test_support_apk_path,
 | 
| -      args.device_flags,
 | 
| -      args.isolate_file_path
 | 
| +      options.tool,
 | 
| +      options.cleanup_test_files,
 | 
| +      options.annotations,
 | 
| +      options.exclude_annotations,
 | 
| +      options.test_filter,
 | 
| +      options.test_data,
 | 
| +      options.save_perf_json,
 | 
| +      options.screenshot_failures,
 | 
| +      options.wait_for_debugger,
 | 
| +      options.coverage_dir,
 | 
| +      options.test_apk,
 | 
| +      options.test_apk_path,
 | 
| +      options.test_apk_jar_path,
 | 
| +      options.test_runner,
 | 
| +      options.test_support_apk_path,
 | 
| +      options.device_flags,
 | 
| +      options.isolate_file_path
 | 
|        )
 | 
|  
 | 
|  
 | 
| -def AddUIAutomatorTestOptions(parser):
 | 
| -  """Adds UI Automator test options to |parser|."""
 | 
| -
 | 
| -  group = parser.add_argument_group('UIAutomator Test Options')
 | 
| -  AddJavaTestOptions(group)
 | 
| -  group.add_argument(
 | 
| -      '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
 | 
| -      metavar='PACKAGE', help='Package under test.')
 | 
| -  group.add_argument(
 | 
| -      '--test-jar', dest='test_jar', required=True,
 | 
| +def AddUIAutomatorTestOptions(option_parser):
 | 
| +  """Adds UI Automator test options to |option_parser|."""
 | 
| +
 | 
| +  option_parser.usage = '%prog uiautomator [options]'
 | 
| +  option_parser.commands_dict = {}
 | 
| +  option_parser.example = (
 | 
| +      '%prog uiautomator --test-jar=chrome_shell_uiautomator_tests'
 | 
| +      ' --package=chrome_shell')
 | 
| +  option_parser.add_option(
 | 
| +      '--package',
 | 
| +      help=('Package under test. Possible values: %s' %
 | 
| +            constants.PACKAGE_INFO.keys()))
 | 
| +  option_parser.add_option(
 | 
| +      '--test-jar', dest='test_jar',
 | 
|        help=('The name of the dexed jar containing the tests (without the '
 | 
|              '.dex.jar extension). Alternatively, this can be a full path '
 | 
|              'to the jar.'))
 | 
|  
 | 
| -  AddCommonOptions(parser)
 | 
| -  AddDeviceOptions(parser)
 | 
| +  AddJavaTestOptions(option_parser)
 | 
| +  AddCommonOptions(option_parser)
 | 
| +  AddDeviceOptions(option_parser)
 | 
|  
 | 
|  
 | 
| -def ProcessUIAutomatorOptions(args):
 | 
| +def ProcessUIAutomatorOptions(options, error_func):
 | 
|    """Processes UIAutomator options/arguments.
 | 
|  
 | 
|    Args:
 | 
| -    args: argparse.Namespace object.
 | 
| +    options: optparse.Options object.
 | 
| +    error_func: Function to call with the error message in case of an error.
 | 
|  
 | 
|    Returns:
 | 
|      A UIAutomatorOptions named tuple which contains all options relevant to
 | 
|      uiautomator tests.
 | 
|    """
 | 
|  
 | 
| -  ProcessJavaTestOptions(args)
 | 
| +  ProcessJavaTestOptions(options)
 | 
| +
 | 
| +  if not options.package:
 | 
| +    error_func('--package is required.')
 | 
|  
 | 
| -  if os.path.exists(args.test_jar):
 | 
| +  if options.package not in constants.PACKAGE_INFO:
 | 
| +    error_func('Invalid package.')
 | 
| +
 | 
| +  if not options.test_jar:
 | 
| +    error_func('--test-jar must be specified.')
 | 
| +
 | 
| +  if os.path.exists(options.test_jar):
 | 
|      # The dexed JAR is fully qualified, assume the info JAR lives along side.
 | 
| -    args.uiautomator_jar = args.test_jar
 | 
| +    options.uiautomator_jar = options.test_jar
 | 
|    else:
 | 
| -    args.uiautomator_jar = os.path.join(
 | 
| +    options.uiautomator_jar = os.path.join(
 | 
|          constants.GetOutDirectory(),
 | 
|          constants.SDK_BUILD_JAVALIB_DIR,
 | 
| -        '%s.dex.jar' % args.test_jar)
 | 
| -  args.uiautomator_info_jar = (
 | 
| -      args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] +
 | 
| +        '%s.dex.jar' % options.test_jar)
 | 
| +  options.uiautomator_info_jar = (
 | 
| +      options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] +
 | 
|        '_java.jar')
 | 
|  
 | 
|    return uiautomator_test_options.UIAutomatorOptions(
 | 
| -      args.tool,
 | 
| -      args.cleanup_test_files,
 | 
| -      args.annotations,
 | 
| -      args.exclude_annotations,
 | 
| -      args.test_filter,
 | 
| -      args.test_data,
 | 
| -      args.save_perf_json,
 | 
| -      args.screenshot_failures,
 | 
| -      args.uiautomator_jar,
 | 
| -      args.uiautomator_info_jar,
 | 
| -      args.package)
 | 
| -
 | 
| -
 | 
| -def AddJUnitTestOptions(parser):
 | 
| -  """Adds junit test options to |parser|."""
 | 
| -
 | 
| -  group = parser.add_argument_group('JUnit Test Options')
 | 
| -  group.add_argument(
 | 
| -      '-s', '--test-suite', dest='test_suite', required=True,
 | 
| +      options.tool,
 | 
| +      options.cleanup_test_files,
 | 
| +      options.annotations,
 | 
| +      options.exclude_annotations,
 | 
| +      options.test_filter,
 | 
| +      options.test_data,
 | 
| +      options.save_perf_json,
 | 
| +      options.screenshot_failures,
 | 
| +      options.uiautomator_jar,
 | 
| +      options.uiautomator_info_jar,
 | 
| +      options.package)
 | 
| +
 | 
| +
 | 
| +def AddJUnitTestOptions(option_parser):
 | 
| +  """Adds junit test options to |option_parser|."""
 | 
| +  option_parser.usage = '%prog junit -s [test suite name]'
 | 
| +  option_parser.commands_dict = {}
 | 
| +
 | 
| +  option_parser.add_option(
 | 
| +      '-s', '--test-suite', dest='test_suite',
 | 
|        help=('JUnit test suite to run.'))
 | 
| -  group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '-f', '--test-filter', dest='test_filter',
 | 
|        help='Filters tests googletest-style.')
 | 
| -  group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '--package-filter', dest='package_filter',
 | 
|        help='Filters tests by package.')
 | 
| -  group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '--runner-filter', dest='runner_filter',
 | 
|        help='Filters tests by runner class. Must be fully qualified.')
 | 
| -  group.add_argument(
 | 
| -      '--sdk-version', dest='sdk_version', type=int,
 | 
| +  option_parser.add_option(
 | 
| +      '--sdk-version', dest='sdk_version', type="int",
 | 
|        help='The Android SDK version.')
 | 
| -  AddCommonOptions(parser)
 | 
| +  AddCommonOptions(option_parser)
 | 
| +
 | 
|  
 | 
| +def ProcessJUnitTestOptions(options, error_func):
 | 
| +  """Processes all JUnit test options."""
 | 
| +  if not options.test_suite:
 | 
| +    error_func('No test suite specified.')
 | 
| +  return options
 | 
|  
 | 
| -def AddMonkeyTestOptions(parser):
 | 
| -  """Adds monkey test options to |parser|."""
 | 
|  
 | 
| -  group = parser.add_argument_group('Monkey Test Options')
 | 
| -  group.add_argument(
 | 
| -      '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
 | 
| -      metavar='PACKAGE', help='Package under test.')
 | 
| -  group.add_argument(
 | 
| -      '--event-count', default=10000, type=int,
 | 
| -      help='Number of events to generate (default: %(default)s).')
 | 
| -  group.add_argument(
 | 
| +def AddMonkeyTestOptions(option_parser):
 | 
| +  """Adds monkey test options to |option_parser|."""
 | 
| +
 | 
| +  option_parser.usage = '%prog monkey [options]'
 | 
| +  option_parser.commands_dict = {}
 | 
| +  option_parser.example = (
 | 
| +      '%prog monkey --package=chrome_shell')
 | 
| +
 | 
| +  option_parser.add_option(
 | 
| +      '--package',
 | 
| +      help=('Package under test. Possible values: %s' %
 | 
| +            constants.PACKAGE_INFO.keys()))
 | 
| +  option_parser.add_option(
 | 
| +      '--event-count', default=10000, type='int',
 | 
| +      help='Number of events to generate [default: %default].')
 | 
| +  option_parser.add_option(
 | 
|        '--category', default='',
 | 
|        help='A list of allowed categories.')
 | 
| -  group.add_argument(
 | 
| -      '--throttle', default=100, type=int,
 | 
| -      help='Delay between events (ms) (default: %(default)s). ')
 | 
| -  group.add_argument(
 | 
| -      '--seed', type=int,
 | 
| +  option_parser.add_option(
 | 
| +      '--throttle', default=100, type='int',
 | 
| +      help='Delay between events (ms) [default: %default]. ')
 | 
| +  option_parser.add_option(
 | 
| +      '--seed', type='int',
 | 
|        help=('Seed value for pseudo-random generator. Same seed value generates '
 | 
|              'the same sequence of events. Seed is randomized by default.'))
 | 
| -  group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '--extra-args', default='',
 | 
| -      help=('String of other args to pass to the command verbatim.'))
 | 
| +      help=('String of other args to pass to the command verbatim '
 | 
| +            '[default: "%default"].'))
 | 
|  
 | 
| -  AddCommonOptions(parser)
 | 
| -  AddDeviceOptions(parser)
 | 
| +  AddCommonOptions(option_parser)
 | 
| +  AddDeviceOptions(option_parser)
 | 
|  
 | 
|  
 | 
| -def ProcessMonkeyTestOptions(args):
 | 
| +def ProcessMonkeyTestOptions(options, error_func):
 | 
|    """Processes all monkey test options.
 | 
|  
 | 
|    Args:
 | 
| -    args: argparse.Namespace object.
 | 
| +    options: optparse.Options object.
 | 
| +    error_func: Function to call with the error message in case of an error.
 | 
|  
 | 
|    Returns:
 | 
|      A MonkeyOptions named tuple which contains all options relevant to
 | 
|      monkey tests.
 | 
|    """
 | 
| -  # TODO(jbudorick): Handle this directly in argparse with nargs='+'
 | 
| -  category = args.category
 | 
| +  if not options.package:
 | 
| +    error_func('--package is required.')
 | 
| +
 | 
| +  if options.package not in constants.PACKAGE_INFO:
 | 
| +    error_func('Invalid package.')
 | 
| +
 | 
| +  category = options.category
 | 
|    if category:
 | 
| -    category = args.category.split(',')
 | 
| +    category = options.category.split(',')
 | 
|  
 | 
| -  # TODO(jbudorick): Get rid of MonkeyOptions.
 | 
|    return monkey_test_options.MonkeyOptions(
 | 
| -      args.verbose_count,
 | 
| -      args.package,
 | 
| -      args.event_count,
 | 
| +      options.verbose_count,
 | 
| +      options.package,
 | 
| +      options.event_count,
 | 
|        category,
 | 
| -      args.throttle,
 | 
| -      args.seed,
 | 
| -      args.extra_args)
 | 
| -
 | 
| -
 | 
| -def AddPerfTestOptions(parser):
 | 
| -  """Adds perf test options to |parser|."""
 | 
| -
 | 
| -  group = parser.add_argument_group('Perf Test Options')
 | 
| -
 | 
| -  class SingleStepAction(argparse.Action):
 | 
| -    def __call__(self, parser, namespace, values, option_string=None):
 | 
| -      if values and not namespace.single_step:
 | 
| -        parser.error('single step command provided, '
 | 
| -                     'but --single-step not specified.')
 | 
| -      elif namespace.single_step and not values:
 | 
| -        parser.error('--single-step specified, '
 | 
| -                     'but no single step command provided.')
 | 
| -      setattr(namespace, self.dest, values)
 | 
| -
 | 
| -  step_group = group.add_mutually_exclusive_group(required=True)
 | 
| -  # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
 | 
| -  # This requires removing "--" from client calls.
 | 
| -  step_group.add_argument(
 | 
| -      '--single-step', action='store_true',
 | 
| +      options.throttle,
 | 
| +      options.seed,
 | 
| +      options.extra_args)
 | 
| +
 | 
| +
 | 
| +def AddPerfTestOptions(option_parser):
 | 
| +  """Adds perf test options to |option_parser|."""
 | 
| +
 | 
| +  option_parser.usage = '%prog perf [options]'
 | 
| +  option_parser.commands_dict = {}
 | 
| +  option_parser.example = ('%prog perf '
 | 
| +                           '[--single-step -- command args] or '
 | 
| +                           '[--steps perf_steps.json] or '
 | 
| +                           '[--print-step step]')
 | 
| +
 | 
| +  option_parser.add_option(
 | 
| +      '--single-step',
 | 
| +      action='store_true',
 | 
|        help='Execute the given command with retries, but only print the result '
 | 
|             'for the "most successful" round.')
 | 
| -  step_group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '--steps',
 | 
|        help='JSON file containing the list of commands to run.')
 | 
| -  step_group.add_argument(
 | 
| -      '--print-step',
 | 
| -      help='The name of a previously executed perf step to print.')
 | 
| -
 | 
| -  group.add_argument(
 | 
| +  option_parser.add_option(
 | 
| +      '--flaky-steps',
 | 
| +      help=('A JSON file containing steps that are flaky '
 | 
| +            'and will have its exit code ignored.'))
 | 
| +  option_parser.add_option(
 | 
|        '--output-json-list',
 | 
|        help='Write a simple list of names from --steps into the given file.')
 | 
| -  group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '--collect-chartjson-data',
 | 
|        action='store_true',
 | 
|        help='Cache the chartjson output from each step for later use.')
 | 
| -  group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '--output-chartjson-data',
 | 
|        default='',
 | 
|        help='Write out chartjson into the given file.')
 | 
| -  group.add_argument(
 | 
| -      '--flaky-steps',
 | 
| -      help=('A JSON file containing steps that are flaky '
 | 
| -            'and will have its exit code ignored.'))
 | 
| -  group.add_argument(
 | 
| +  option_parser.add_option(
 | 
| +      '--print-step',
 | 
| +      help='The name of a previously executed perf step to print.')
 | 
| +  option_parser.add_option(
 | 
|        '--no-timeout', action='store_true',
 | 
|        help=('Do not impose a timeout. Each perf step is responsible for '
 | 
|              'implementing the timeout logic.'))
 | 
| -  group.add_argument(
 | 
| +  option_parser.add_option(
 | 
|        '-f', '--test-filter',
 | 
|        help=('Test filter (will match against the names listed in --steps).'))
 | 
| -  group.add_argument(
 | 
| -      '--dry-run', action='store_true',
 | 
| +  option_parser.add_option(
 | 
| +      '--dry-run',
 | 
| +      action='store_true',
 | 
|        help='Just print the steps without executing.')
 | 
| -  group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
 | 
| -                     help='If --single-step is specified, the command to run.')
 | 
| -  AddCommonOptions(parser)
 | 
| -  AddDeviceOptions(parser)
 | 
| +  AddCommonOptions(option_parser)
 | 
| +  AddDeviceOptions(option_parser)
 | 
|  
 | 
|  
 | 
| -def ProcessPerfTestOptions(args):
 | 
| +def ProcessPerfTestOptions(options, args, error_func):
 | 
|    """Processes all perf test options.
 | 
|  
 | 
|    Args:
 | 
| -    args: argparse.Namespace object.
 | 
| +    options: optparse.Options object.
 | 
| +    error_func: Function to call with the error message in case of an error.
 | 
|  
 | 
|    Returns:
 | 
|      A PerfOptions named tuple which contains all options relevant to
 | 
|      perf tests.
 | 
|    """
 | 
| -  # TODO(jbudorick): Move single_step handling down into the perf tests.
 | 
| -  if args.single_step:
 | 
| -    args.single_step = ' '.join(args.single_step_command)
 | 
| -  # TODO(jbudorick): Get rid of PerfOptions.
 | 
| +  # Only one of steps, print_step or single_step must be provided.
 | 
| +  count = len(filter(None,
 | 
| +                     [options.steps, options.print_step, options.single_step]))
 | 
| +  if count != 1:
 | 
| +    error_func('Please specify one of: --steps, --print-step, --single-step.')
 | 
| +  single_step = None
 | 
| +  if options.single_step:
 | 
| +    single_step = ' '.join(args[2:])
 | 
|    return perf_test_options.PerfOptions(
 | 
| -      args.steps, args.flaky_steps, args.output_json_list,
 | 
| -      args.print_step, args.no_timeout, args.test_filter,
 | 
| -      args.dry_run, args.single_step, args.collect_chartjson_data,
 | 
| -      args.output_chartjson_data)
 | 
| +      options.steps, options.flaky_steps, options.output_json_list,
 | 
| +      options.print_step, options.no_timeout, options.test_filter,
 | 
| +      options.dry_run, single_step, options.collect_chartjson_data,
 | 
| +      options.output_chartjson_data)
 | 
| +
 | 
|  
 | 
| +def AddPythonTestOptions(option_parser):
 | 
| +  option_parser.add_option('-s', '--suite', dest='suite_name',
 | 
| +                           help=('Name of the test suite to run'
 | 
| +                                 '(use -s help to list them).'))
 | 
| +  AddCommonOptions(option_parser)
 | 
|  
 | 
| -def AddPythonTestOptions(parser):
 | 
| -  group = parser.add_argument_group('Python Test Options')
 | 
| -  group.add_argument(
 | 
| -      '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
 | 
| -      choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
 | 
| -      help='Name of the test suite to run.')
 | 
| -  AddCommonOptions(parser)
 | 
|  
 | 
| +def ProcessPythonTestOptions(options, error_func):
 | 
| +  if options.suite_name not in constants.PYTHON_UNIT_TEST_SUITES:
 | 
| +    available = ('Available test suites: [%s]' %
 | 
| +                 ', '.join(constants.PYTHON_UNIT_TEST_SUITES.iterkeys()))
 | 
| +    if options.suite_name == 'help':
 | 
| +      print available
 | 
| +    else:
 | 
| +      error_func('"%s" is not a valid suite. %s' %
 | 
| +                 (options.suite_name, available))
 | 
|  
 | 
| -def _RunGTests(args, devices):
 | 
| +
 | 
| +def _RunGTests(options, devices):
 | 
|    """Subcommand of RunTestsCommands which runs gtests."""
 | 
| +  ProcessGTestOptions(options)
 | 
| +
 | 
|    exit_code = 0
 | 
| -  for suite_name in args.suite_name:
 | 
| -    # TODO(jbudorick): Either deprecate multi-suite or move its handling down
 | 
| -    # into the gtest code.
 | 
| +  for suite_name in options.suite_name:
 | 
| +    # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for
 | 
| +    # the gtest command.
 | 
|      gtest_options = gtest_test_options.GTestOptions(
 | 
| -        args.tool,
 | 
| -        args.cleanup_test_files,
 | 
| -        args.test_filter,
 | 
| -        args.run_disabled,
 | 
| -        args.test_arguments,
 | 
| -        args.timeout,
 | 
| -        args.isolate_file_path,
 | 
| +        options.tool,
 | 
| +        options.cleanup_test_files,
 | 
| +        options.test_filter,
 | 
| +        options.run_disabled,
 | 
| +        options.test_arguments,
 | 
| +        options.timeout,
 | 
| +        options.isolate_file_path,
 | 
|          suite_name)
 | 
|      runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
 | 
|  
 | 
|      results, test_exit_code = test_dispatcher.RunTests(
 | 
|          tests, runner_factory, devices, shard=True, test_timeout=None,
 | 
| -        num_retries=args.num_retries)
 | 
| +        num_retries=options.num_retries)
 | 
|  
 | 
|      if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
 | 
|        exit_code = test_exit_code
 | 
| @@ -569,10 +648,10 @@ def _RunGTests(args, devices):
 | 
|          results=results,
 | 
|          test_type='Unit test',
 | 
|          test_package=suite_name,
 | 
| -        flakiness_server=args.flakiness_dashboard_server)
 | 
| +        flakiness_server=options.flakiness_dashboard_server)
 | 
|  
 | 
| -    if args.json_results_file:
 | 
| -      json_results.GenerateJsonResultsFile(results, args.json_results_file)
 | 
| +    if options.json_results_file:
 | 
| +      json_results.GenerateJsonResultsFile(results, options.json_results_file)
 | 
|  
 | 
|    if os.path.isdir(constants.ISOLATE_DEPS_DIR):
 | 
|      shutil.rmtree(constants.ISOLATE_DEPS_DIR)
 | 
| @@ -580,57 +659,55 @@ def _RunGTests(args, devices):
 | 
|    return exit_code
 | 
|  
 | 
|  
 | 
| -def _RunLinkerTests(args, devices):
 | 
| +def _RunLinkerTests(options, devices):
 | 
|    """Subcommand of RunTestsCommands which runs linker tests."""
 | 
| -  runner_factory, tests = linker_setup.Setup(args, devices)
 | 
| +  runner_factory, tests = linker_setup.Setup(options, devices)
 | 
|  
 | 
|    results, exit_code = test_dispatcher.RunTests(
 | 
|        tests, runner_factory, devices, shard=True, test_timeout=60,
 | 
| -      num_retries=args.num_retries)
 | 
| +      num_retries=options.num_retries)
 | 
|  
 | 
|    report_results.LogFull(
 | 
|        results=results,
 | 
|        test_type='Linker test',
 | 
|        test_package='ChromiumLinkerTest')
 | 
|  
 | 
| -  if args.json_results_file:
 | 
| -    json_results.GenerateJsonResultsFile(results, args.json_results_file)
 | 
| +  if options.json_results_file:
 | 
| +    json_results.GenerateJsonResultsFile(results, options.json_results_file)
 | 
|  
 | 
|    return exit_code
 | 
|  
 | 
|  
 | 
| -def _RunInstrumentationTests(args, devices):
 | 
| +def _RunInstrumentationTests(options, error_func, devices):
 | 
|    """Subcommand of RunTestsCommands which runs instrumentation tests."""
 | 
| -  logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices)))
 | 
| +  instrumentation_options = ProcessInstrumentationOptions(options, error_func)
 | 
|  
 | 
| -  instrumentation_options = ProcessInstrumentationOptions(args)
 | 
| -
 | 
| -  if len(devices) > 1 and args.wait_for_debugger:
 | 
| +  if len(devices) > 1 and options.wait_for_debugger:
 | 
|      logging.warning('Debugger can not be sharded, using first available device')
 | 
|      devices = devices[:1]
 | 
|  
 | 
|    results = base_test_result.TestRunResults()
 | 
|    exit_code = 0
 | 
|  
 | 
| -  if args.run_java_tests:
 | 
| +  if options.run_java_tests:
 | 
|      runner_factory, tests = instrumentation_setup.Setup(
 | 
|          instrumentation_options, devices)
 | 
|  
 | 
|      test_results, exit_code = test_dispatcher.RunTests(
 | 
|          tests, runner_factory, devices, shard=True, test_timeout=None,
 | 
| -        num_retries=args.num_retries)
 | 
| +        num_retries=options.num_retries)
 | 
|  
 | 
|      results.AddTestRunResults(test_results)
 | 
|  
 | 
| -  if args.run_python_tests:
 | 
| +  if options.run_python_tests:
 | 
|      runner_factory, tests = host_driven_setup.InstrumentationSetup(
 | 
| -        args.host_driven_root, args.official_build,
 | 
| +        options.host_driven_root, options.official_build,
 | 
|          instrumentation_options)
 | 
|  
 | 
|      if tests:
 | 
|        test_results, test_exit_code = test_dispatcher.RunTests(
 | 
|            tests, runner_factory, devices, shard=True, test_timeout=None,
 | 
| -          num_retries=args.num_retries)
 | 
| +          num_retries=options.num_retries)
 | 
|  
 | 
|        results.AddTestRunResults(test_results)
 | 
|  
 | 
| @@ -638,77 +715,79 @@ def _RunInstrumentationTests(args, devices):
 | 
|        if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
 | 
|          exit_code = test_exit_code
 | 
|  
 | 
| -  if args.device_flags:
 | 
| -    args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
 | 
| -                                     args.device_flags)
 | 
| +  if options.device_flags:
 | 
| +    options.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
 | 
| +                                        options.device_flags)
 | 
|  
 | 
|    report_results.LogFull(
 | 
|        results=results,
 | 
|        test_type='Instrumentation',
 | 
| -      test_package=os.path.basename(args.test_apk),
 | 
| -      annotation=args.annotations,
 | 
| -      flakiness_server=args.flakiness_dashboard_server)
 | 
| +      test_package=os.path.basename(options.test_apk),
 | 
| +      annotation=options.annotations,
 | 
| +      flakiness_server=options.flakiness_dashboard_server)
 | 
|  
 | 
| -  if args.json_results_file:
 | 
| -    json_results.GenerateJsonResultsFile(results, args.json_results_file)
 | 
| +  if options.json_results_file:
 | 
| +    json_results.GenerateJsonResultsFile(results, options.json_results_file)
 | 
|  
 | 
|    return exit_code
 | 
|  
 | 
|  
 | 
| -def _RunUIAutomatorTests(args, devices):
 | 
| +def _RunUIAutomatorTests(options, error_func, devices):
 | 
|    """Subcommand of RunTestsCommands which runs uiautomator tests."""
 | 
| -  uiautomator_options = ProcessUIAutomatorOptions(args)
 | 
| +  uiautomator_options = ProcessUIAutomatorOptions(options, error_func)
 | 
|  
 | 
|    runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
 | 
|  
 | 
|    results, exit_code = test_dispatcher.RunTests(
 | 
|        tests, runner_factory, devices, shard=True, test_timeout=None,
 | 
| -      num_retries=args.num_retries)
 | 
| +      num_retries=options.num_retries)
 | 
|  
 | 
|    report_results.LogFull(
 | 
|        results=results,
 | 
|        test_type='UIAutomator',
 | 
| -      test_package=os.path.basename(args.test_jar),
 | 
| -      annotation=args.annotations,
 | 
| -      flakiness_server=args.flakiness_dashboard_server)
 | 
| +      test_package=os.path.basename(options.test_jar),
 | 
| +      annotation=options.annotations,
 | 
| +      flakiness_server=options.flakiness_dashboard_server)
 | 
|  
 | 
| -  if args.json_results_file:
 | 
| -    json_results.GenerateJsonResultsFile(results, args.json_results_file)
 | 
| +  if options.json_results_file:
 | 
| +    json_results.GenerateJsonResultsFile(results, options.json_results_file)
 | 
|  
 | 
|    return exit_code
 | 
|  
 | 
|  
 | 
| -def _RunJUnitTests(args):
 | 
| +def _RunJUnitTests(options, error_func):
 | 
|    """Subcommand of RunTestsCommand which runs junit tests."""
 | 
| -  runner_factory, tests = junit_setup.Setup(args)
 | 
| +  junit_options = ProcessJUnitTestOptions(options, error_func)
 | 
| +  runner_factory, tests = junit_setup.Setup(junit_options)
 | 
|    _, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
 | 
| +
 | 
|    return exit_code
 | 
|  
 | 
|  
 | 
| -def _RunMonkeyTests(args, devices):
 | 
| +def _RunMonkeyTests(options, error_func, devices):
 | 
|    """Subcommand of RunTestsCommands which runs monkey tests."""
 | 
| -  monkey_options = ProcessMonkeyTestOptions(args)
 | 
| +  monkey_options = ProcessMonkeyTestOptions(options, error_func)
 | 
|  
 | 
|    runner_factory, tests = monkey_setup.Setup(monkey_options)
 | 
|  
 | 
|    results, exit_code = test_dispatcher.RunTests(
 | 
|        tests, runner_factory, devices, shard=False, test_timeout=None,
 | 
| -      num_retries=args.num_retries)
 | 
| +      num_retries=options.num_retries)
 | 
|  
 | 
|    report_results.LogFull(
 | 
|        results=results,
 | 
|        test_type='Monkey',
 | 
|        test_package='Monkey')
 | 
|  
 | 
| -  if args.json_results_file:
 | 
| -    json_results.GenerateJsonResultsFile(results, args.json_results_file)
 | 
| +  if options.json_results_file:
 | 
| +    json_results.GenerateJsonResultsFile(results, options.json_results_file)
 | 
|  
 | 
|    return exit_code
 | 
|  
 | 
|  
 | 
| -def _RunPerfTests(args):
 | 
| +def _RunPerfTests(options, args, error_func):
 | 
|    """Subcommand of RunTestsCommands which runs perf tests."""
 | 
| -  perf_options = ProcessPerfTestOptions(args)
 | 
| +  perf_options = ProcessPerfTestOptions(options, args, error_func)
 | 
|  
 | 
|    # Just save a simple json with a list of test names.
 | 
|    if perf_options.output_json_list:
 | 
| @@ -731,15 +810,15 @@ def _RunPerfTests(args):
 | 
|    # which increases throughput but have no affinity.
 | 
|    results, _ = test_dispatcher.RunTests(
 | 
|        tests, runner_factory, devices, shard=False, test_timeout=None,
 | 
| -      num_retries=args.num_retries)
 | 
| +      num_retries=options.num_retries)
 | 
|  
 | 
|    report_results.LogFull(
 | 
|        results=results,
 | 
|        test_type='Perf',
 | 
|        test_package='Perf')
 | 
|  
 | 
| -  if args.json_results_file:
 | 
| -    json_results.GenerateJsonResultsFile(results, args.json_results_file)
 | 
| +  if options.json_results_file:
 | 
| +    json_results.GenerateJsonResultsFile(results, options.json_results_file)
 | 
|  
 | 
|    if perf_options.single_step:
 | 
|      return perf_test_runner.PrintTestOutput('single_step')
 | 
| @@ -751,9 +830,11 @@ def _RunPerfTests(args):
 | 
|    return 0
 | 
|  
 | 
|  
 | 
| -def _RunPythonTests(args):
 | 
| +def _RunPythonTests(options, error_func):
 | 
|    """Subcommand of RunTestsCommand which runs python unit tests."""
 | 
| -  suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
 | 
| +  ProcessPythonTestOptions(options, error_func)
 | 
| +
 | 
| +  suite_vars = constants.PYTHON_UNIT_TEST_SUITES[options.suite_name]
 | 
|    suite_path = suite_vars['path']
 | 
|    suite_test_modules = suite_vars['test_modules']
 | 
|  
 | 
| @@ -762,7 +843,7 @@ def _RunPythonTests(args):
 | 
|      suite = unittest.TestSuite()
 | 
|      suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
 | 
|                     for m in suite_test_modules)
 | 
| -    runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
 | 
| +    runner = unittest.TextTestRunner(verbosity=1+options.verbose_count)
 | 
|      return 0 if runner.run(suite).wasSuccessful() else 1
 | 
|    finally:
 | 
|      sys.path = sys.path[1:]
 | 
| @@ -791,12 +872,15 @@ def _GetAttachedDevices(test_device=None):
 | 
|    return sorted(attached_devices)
 | 
|  
 | 
|  
 | 
| -def RunTestsCommand(args, parser):
 | 
| +def RunTestsCommand(command, options, args, option_parser):
 | 
|    """Checks test type and dispatches to the appropriate function.
 | 
|  
 | 
|    Args:
 | 
| -    args: argparse.Namespace object.
 | 
| -    parser: argparse.ArgumentParser object.
 | 
| +    command: String indicating the command that was received to trigger
 | 
| +        this function.
 | 
| +    options: optparse options dictionary.
 | 
| +    args: List of extra args from optparse.
 | 
| +    option_parser: optparse.OptionParser object.
 | 
|  
 | 
|    Returns:
 | 
|      Integer indicated exit code.
 | 
| @@ -805,38 +889,47 @@ def RunTestsCommand(args, parser):
 | 
|      Exception: Unknown command name passed in, or an exception from an
 | 
|          individual test runner.
 | 
|    """
 | 
| -  command = args.command
 | 
|  
 | 
| -  ProcessCommonOptions(args)
 | 
| +  # Check for extra arguments
 | 
| +  if len(args) > 2 and command != 'perf':
 | 
| +    option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:])))
 | 
| +    return constants.ERROR_EXIT_CODE
 | 
| +  if command == 'perf':
 | 
| +    if ((options.single_step and len(args) <= 2) or
 | 
| +        (not options.single_step and len(args) > 2)):
 | 
| +      option_parser.error('Unrecognized arguments: %s' % (' '.join(args)))
 | 
| +      return constants.ERROR_EXIT_CODE
 | 
| +
 | 
| +  ProcessCommonOptions(options, option_parser.error)
 | 
|  
 | 
| -  if args.enable_platform_mode:
 | 
| -    return RunTestsInPlatformMode(args, parser.error)
 | 
| +  if options.enable_platform_mode:
 | 
| +    return RunTestsInPlatformMode(command, options, option_parser)
 | 
|  
 | 
|    if command in constants.LOCAL_MACHINE_TESTS:
 | 
|      devices = []
 | 
|    else:
 | 
| -    devices = _GetAttachedDevices(args.test_device)
 | 
| +    devices = _GetAttachedDevices(options.test_device)
 | 
|  
 | 
|    forwarder.Forwarder.RemoveHostLog()
 | 
|    if not ports.ResetTestServerPortAllocation():
 | 
|      raise Exception('Failed to reset test server port.')
 | 
|  
 | 
|    if command == 'gtest':
 | 
| -    return _RunGTests(args, devices)
 | 
| +    return _RunGTests(options, devices)
 | 
|    elif command == 'linker':
 | 
| -    return _RunLinkerTests(args, devices)
 | 
| +    return _RunLinkerTests(options, devices)
 | 
|    elif command == 'instrumentation':
 | 
| -    return _RunInstrumentationTests(args, devices)
 | 
| +    return _RunInstrumentationTests(options, option_parser.error, devices)
 | 
|    elif command == 'uiautomator':
 | 
| -    return _RunUIAutomatorTests(args, devices)
 | 
| +    return _RunUIAutomatorTests(options, option_parser.error, devices)
 | 
|    elif command == 'junit':
 | 
| -    return _RunJUnitTests(args)
 | 
| +    return _RunJUnitTests(options, option_parser.error)
 | 
|    elif command == 'monkey':
 | 
| -    return _RunMonkeyTests(args, devices)
 | 
| +    return _RunMonkeyTests(options, option_parser.error, devices)
 | 
|    elif command == 'perf':
 | 
| -    return _RunPerfTests(args)
 | 
| +    return _RunPerfTests(options, args, option_parser.error)
 | 
|    elif command == 'python':
 | 
| -    return _RunPythonTests(args)
 | 
| +    return _RunPythonTests(options, option_parser.error)
 | 
|    else:
 | 
|      raise Exception('Unknown test type.')
 | 
|  
 | 
| @@ -847,60 +940,97 @@ _SUPPORTED_IN_PLATFORM_MODE = [
 | 
|  ]
 | 
|  
 | 
|  
 | 
| -def RunTestsInPlatformMode(args, parser):
 | 
| +def RunTestsInPlatformMode(command, options, option_parser):
 | 
|  
 | 
| -  if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
 | 
| -    parser.error('%s is not yet supported in platform mode' % args.command)
 | 
| +  if command not in _SUPPORTED_IN_PLATFORM_MODE:
 | 
| +    option_parser.error('%s is not yet supported in platform mode' % command)
 | 
|  
 | 
| -  with environment_factory.CreateEnvironment(args, parser.error) as env:
 | 
| -    with test_instance_factory.CreateTestInstance(args, parser.error) as test:
 | 
| +  with environment_factory.CreateEnvironment(
 | 
| +      command, options, option_parser.error) as env:
 | 
| +    with test_instance_factory.CreateTestInstance(
 | 
| +        command, options, option_parser.error) as test:
 | 
|        with test_run_factory.CreateTestRun(
 | 
| -          args, env, test, parser.error) as test_run:
 | 
| +          options, env, test, option_parser.error) as test_run:
 | 
|          results = test_run.RunTests()
 | 
|  
 | 
|          report_results.LogFull(
 | 
|              results=results,
 | 
|              test_type=test.TestType(),
 | 
|              test_package=test_run.TestPackage(),
 | 
| -            annotation=args.annotations,
 | 
| -            flakiness_server=args.flakiness_dashboard_server)
 | 
| +            annotation=options.annotations,
 | 
| +            flakiness_server=options.flakiness_dashboard_server)
 | 
|  
 | 
| -        if args.json_results_file:
 | 
| +        if options.json_results_file:
 | 
|            json_results.GenerateJsonResultsFile(
 | 
| -              results, args.json_results_file)
 | 
| +              results, options.json_results_file)
 | 
|  
 | 
|    return results
 | 
|  
 | 
|  
 | 
| -CommandConfigTuple = collections.namedtuple(
 | 
| -    'CommandConfigTuple',
 | 
| -    ['add_options_func', 'help_txt'])
 | 
| +def HelpCommand(command, _options, args, option_parser):
 | 
| +  """Display help for a certain command, or overall help.
 | 
| +
 | 
| +  Args:
 | 
| +    command: String indicating the command that was received to trigger
 | 
| +        this function.
 | 
| +    options: optparse options dictionary. unused.
 | 
| +    args: List of extra args from optparse.
 | 
| +    option_parser: optparse.OptionParser object.
 | 
| +
 | 
| +  Returns:
 | 
| +    Integer indicated exit code.
 | 
| +  """
 | 
| +  # If we don't have any args, display overall help
 | 
| +  if len(args) < 3:
 | 
| +    option_parser.print_help()
 | 
| +    return 0
 | 
| +  # If we have too many args, print an error
 | 
| +  if len(args) > 3:
 | 
| +    option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:])))
 | 
| +    return constants.ERROR_EXIT_CODE
 | 
| +
 | 
| +  command = args[2]
 | 
| +
 | 
| +  if command not in VALID_COMMANDS:
 | 
| +    option_parser.error('Unrecognized command.')
 | 
| +
 | 
| +  # Treat the help command as a special case. We don't care about showing a
 | 
| +  # specific help page for itself.
 | 
| +  if command == 'help':
 | 
| +    option_parser.print_help()
 | 
| +    return 0
 | 
| +
 | 
| +  VALID_COMMANDS[command].add_options_func(option_parser)
 | 
| +  option_parser.usage = '%prog ' + command + ' [options]'
 | 
| +  option_parser.commands_dict = {}
 | 
| +  option_parser.print_help()
 | 
| +
 | 
| +  return 0
 | 
| +
 | 
| +
 | 
| +# Define a named tuple for the values in the VALID_COMMANDS dictionary so the
 | 
| +# syntax is a bit prettier. The tuple is two functions: (add options, run
 | 
| +# command).
 | 
| +CommandFunctionTuple = collections.namedtuple(
 | 
| +    'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
 | 
|  VALID_COMMANDS = {
 | 
| -    'gtest': CommandConfigTuple(
 | 
| -        AddGTestOptions,
 | 
| -        'googletest-based C++ tests'),
 | 
| -    'instrumentation': CommandConfigTuple(
 | 
| -        AddInstrumentationTestOptions,
 | 
| -        'InstrumentationTestCase-based Java tests'),
 | 
| -    'uiautomator': CommandConfigTuple(
 | 
| -        AddUIAutomatorTestOptions,
 | 
| -        "Tests that run via Android's uiautomator command"),
 | 
| -    'junit': CommandConfigTuple(
 | 
| -        AddJUnitTestOptions,
 | 
| -        'JUnit4-based Java tests'),
 | 
| -    'monkey': CommandConfigTuple(
 | 
| -        AddMonkeyTestOptions,
 | 
| -        "Tests based on Android's monkey"),
 | 
| -    'perf': CommandConfigTuple(
 | 
| -        AddPerfTestOptions,
 | 
| -        'Performance tests'),
 | 
| -    'python': CommandConfigTuple(
 | 
| -        AddPythonTestOptions,
 | 
| -        'Python tests based on unittest.TestCase'),
 | 
| -    'linker': CommandConfigTuple(
 | 
| -        AddLinkerTestOptions,
 | 
| -        'Linker tests'),
 | 
| -}
 | 
| +    'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand),
 | 
| +    'instrumentation': CommandFunctionTuple(
 | 
| +        AddInstrumentationTestOptions, RunTestsCommand),
 | 
| +    'uiautomator': CommandFunctionTuple(
 | 
| +        AddUIAutomatorTestOptions, RunTestsCommand),
 | 
| +    'junit': CommandFunctionTuple(
 | 
| +        AddJUnitTestOptions, RunTestsCommand),
 | 
| +    'monkey': CommandFunctionTuple(
 | 
| +        AddMonkeyTestOptions, RunTestsCommand),
 | 
| +    'perf': CommandFunctionTuple(
 | 
| +        AddPerfTestOptions, RunTestsCommand),
 | 
| +    'python': CommandFunctionTuple(
 | 
| +        AddPythonTestOptions, RunTestsCommand),
 | 
| +    'linker': CommandFunctionTuple(
 | 
| +        AddLinkerTestOptions, RunTestsCommand),
 | 
| +    'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand)
 | 
| +    }
 | 
|  
 | 
|  
 | 
|  def DumpThreadStacks(_signal, _frame):
 | 
| @@ -910,21 +1040,9 @@ def DumpThreadStacks(_signal, _frame):
 | 
|  
 | 
|  def main():
 | 
|    signal.signal(signal.SIGUSR1, DumpThreadStacks)
 | 
| -
 | 
| -  parser = argparse.ArgumentParser()
 | 
| -  command_parsers = parser.add_subparsers(title='test types',
 | 
| -                                          dest='command')
 | 
| -
 | 
| -  for test_type, config in sorted(VALID_COMMANDS.iteritems(),
 | 
| -                                  key=lambda x: x[0]):
 | 
| -    subparser = command_parsers.add_parser(
 | 
| -        test_type, usage='%(prog)s [options]', help=config.help_txt)
 | 
| -    config.add_options_func(subparser)
 | 
| -
 | 
| -  args = parser.parse_args()
 | 
| -  RunTestsCommand(args, parser)
 | 
| -
 | 
| -  return 0
 | 
| +  option_parser = command_option_parser.CommandOptionParser(
 | 
| +      commands_dict=VALID_COMMANDS)
 | 
| +  return command_option_parser.ParseAndExecute(option_parser)
 | 
|  
 | 
|  
 | 
|  if __name__ == '__main__':
 | 
| 
 |