| OLD | NEW |
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # | 2 # |
| 3 # Copyright 2013 The Chromium Authors. All rights reserved. | 3 # Copyright 2013 The Chromium Authors. All rights reserved. |
| 4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
| 5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
| 6 | 6 |
| 7 """Runs all types of tests from one unified interface.""" | 7 """Runs all types of tests from one unified interface.""" |
| 8 | 8 |
| 9 import argparse | |
| 10 import collections | 9 import collections |
| 11 import logging | 10 import logging |
| 11 import optparse |
| 12 import os | 12 import os |
| 13 import shutil | 13 import shutil |
| 14 import signal | 14 import signal |
| 15 import sys | 15 import sys |
| 16 import threading | 16 import threading |
| 17 import unittest | 17 import unittest |
| 18 | 18 |
| 19 from pylib import android_commands | 19 from pylib import android_commands |
| 20 from pylib import constants | 20 from pylib import constants |
| 21 from pylib import forwarder | 21 from pylib import forwarder |
| (...skipping 15 matching lines...) Expand all Loading... |
| 37 from pylib.monkey import setup as monkey_setup | 37 from pylib.monkey import setup as monkey_setup |
| 38 from pylib.monkey import test_options as monkey_test_options | 38 from pylib.monkey import test_options as monkey_test_options |
| 39 from pylib.perf import setup as perf_setup | 39 from pylib.perf import setup as perf_setup |
| 40 from pylib.perf import test_options as perf_test_options | 40 from pylib.perf import test_options as perf_test_options |
| 41 from pylib.perf import test_runner as perf_test_runner | 41 from pylib.perf import test_runner as perf_test_runner |
| 42 from pylib.results import json_results | 42 from pylib.results import json_results |
| 43 from pylib.results import report_results | 43 from pylib.results import report_results |
| 44 from pylib.uiautomator import setup as uiautomator_setup | 44 from pylib.uiautomator import setup as uiautomator_setup |
| 45 from pylib.uiautomator import test_options as uiautomator_test_options | 45 from pylib.uiautomator import test_options as uiautomator_test_options |
| 46 from pylib.utils import apk_helper | 46 from pylib.utils import apk_helper |
| 47 from pylib.utils import command_option_parser |
| 47 from pylib.utils import reraiser_thread | 48 from pylib.utils import reraiser_thread |
| 48 from pylib.utils import run_tests_helper | 49 from pylib.utils import run_tests_helper |
| 49 | 50 |
| 50 | 51 |
| 51 def AddCommonOptions(parser): | 52 def AddCommonOptions(option_parser): |
| 52 """Adds all common options to |parser|.""" | 53 """Adds all common options to |option_parser|.""" |
| 53 | 54 |
| 54 group = parser.add_argument_group('Common Options') | 55 group = optparse.OptionGroup(option_parser, 'Common Options') |
| 55 | |
| 56 default_build_type = os.environ.get('BUILDTYPE', 'Debug') | 56 default_build_type = os.environ.get('BUILDTYPE', 'Debug') |
| 57 | 57 group.add_option('--debug', action='store_const', const='Debug', |
| 58 debug_or_release_group = group.add_mutually_exclusive_group() | 58 dest='build_type', default=default_build_type, |
| 59 debug_or_release_group.add_argument( | 59 help=('If set, run test suites under out/Debug. ' |
| 60 '--debug', action='store_const', const='Debug', dest='build_type', | 60 'Default is env var BUILDTYPE or Debug.')) |
| 61 default=default_build_type, | 61 group.add_option('--release', action='store_const', |
| 62 help=('If set, run test suites under out/Debug. ' | 62 const='Release', dest='build_type', |
| 63 'Default is env var BUILDTYPE or Debug.')) | 63 help=('If set, run test suites under out/Release.' |
| 64 debug_or_release_group.add_argument( | 64 ' Default is env var BUILDTYPE or Debug.')) |
| 65 '--release', action='store_const', const='Release', dest='build_type', | 65 group.add_option('--build-directory', dest='build_directory', |
| 66 help=('If set, run test suites under out/Release. ' | 66 help=('Path to the directory in which build files are' |
| 67 'Default is env var BUILDTYPE or Debug.')) | 67 ' located (should not include build type)')) |
| 68 | 68 group.add_option('--output-directory', dest='output_directory', |
| 69 group.add_argument('--build-directory', dest='build_directory', | 69 help=('Path to the directory in which build files are' |
| 70 help=('Path to the directory in which build files are' | 70 ' located (must include build type). This will take' |
| 71 ' located (should not include build type)')) | 71 ' precedence over --debug, --release and' |
| 72 group.add_argument('--output-directory', dest='output_directory', | 72 ' --build-directory')) |
| 73 help=('Path to the directory in which build files are' | 73 group.add_option('--num_retries', dest='num_retries', type='int', |
| 74 ' located (must include build type). This will take' | 74 default=2, |
| 75 ' precedence over --debug, --release and' | 75 help=('Number of retries for a test before ' |
| 76 ' --build-directory')) | 76 'giving up.')) |
| 77 group.add_argument('--num_retries', dest='num_retries', type=int, default=2, | 77 group.add_option('-v', |
| 78 help=('Number of retries for a test before ' | 78 '--verbose', |
| 79 'giving up (default: %(default)s).')) | 79 dest='verbose_count', |
| 80 group.add_argument('-v', | 80 default=0, |
| 81 '--verbose', | 81 action='count', |
| 82 dest='verbose_count', | 82 help='Verbose level (multiple times for more)') |
| 83 default=0, | 83 group.add_option('--flakiness-dashboard-server', |
| 84 action='count', | 84 dest='flakiness_dashboard_server', |
| 85 help='Verbose level (multiple times for more)') | 85 help=('Address of the server that is hosting the ' |
| 86 group.add_argument('--flakiness-dashboard-server', | 86 'Chrome for Android flakiness dashboard.')) |
| 87 dest='flakiness_dashboard_server', | 87 group.add_option('--enable-platform-mode', action='store_true', |
| 88 help=('Address of the server that is hosting the ' | 88 help=('Run the test scripts in platform mode, which ' |
| 89 'Chrome for Android flakiness dashboard.')) | 89 'conceptually separates the test runner from the ' |
| 90 group.add_argument('--enable-platform-mode', action='store_true', | 90 '"device" (local or remote, real or emulated) on ' |
| 91 help=('Run the test scripts in platform mode, which ' | 91 'which the tests are running. [experimental]')) |
| 92 'conceptually separates the test runner from the ' | 92 group.add_option('-e', '--environment', default='local', |
| 93 '"device" (local or remote, real or emulated) on ' | 93 help=('Test environment to run in. Must be one of: %s' % |
| 94 'which the tests are running. [experimental]')) | 94 ', '.join(constants.VALID_ENVIRONMENTS))) |
| 95 group.add_argument('-e', '--environment', default='local', | 95 group.add_option('--adb-path', |
| 96 choices=constants.VALID_ENVIRONMENTS, | 96 help=('Specify the absolute path of the adb binary that ' |
| 97 help='Test environment to run in (default: %(default)s).') | 97 'should be used.')) |
| 98 group.add_argument('--adb-path', | 98 group.add_option('--json-results-file', dest='json_results_file', |
| 99 help=('Specify the absolute path of the adb binary that ' | 99 help='If set, will dump results in JSON format ' |
| 100 'should be used.')) | 100 'to specified file.') |
| 101 group.add_argument('--json-results-file', dest='json_results_file', | 101 option_parser.add_option_group(group) |
| 102 help='If set, will dump results in JSON form ' | |
| 103 'to specified file.') | |
| 104 | 102 |
| 105 | 103 |
| 106 def ProcessCommonOptions(args): | 104 def ProcessCommonOptions(options, error_func): |
| 107 """Processes and handles all common options.""" | 105 """Processes and handles all common options.""" |
| 108 run_tests_helper.SetLogLevel(args.verbose_count) | 106 run_tests_helper.SetLogLevel(options.verbose_count) |
| 109 constants.SetBuildType(args.build_type) | 107 constants.SetBuildType(options.build_type) |
| 110 if args.build_directory: | 108 if options.build_directory: |
| 111 constants.SetBuildDirectory(args.build_directory) | 109 constants.SetBuildDirectory(options.build_directory) |
| 112 if args.output_directory: | 110 if options.output_directory: |
| 113 constants.SetOutputDirectort(args.output_directory) | 111 constants.SetOutputDirectort(options.output_directory) |
| 114 if args.adb_path: | 112 if options.adb_path: |
| 115 constants.SetAdbPath(args.adb_path) | 113 constants.SetAdbPath(options.adb_path) |
| 116 # Some things such as Forwarder require ADB to be in the environment path. | 114 # Some things such as Forwarder require ADB to be in the environment path. |
| 117 adb_dir = os.path.dirname(constants.GetAdbPath()) | 115 adb_dir = os.path.dirname(constants.GetAdbPath()) |
| 118 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep): | 116 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep): |
| 119 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH'] | 117 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH'] |
| 118 if options.environment not in constants.VALID_ENVIRONMENTS: |
| 119 error_func('--environment must be one of: %s' % |
| 120 ', '.join(constants.VALID_ENVIRONMENTS)) |
| 120 | 121 |
| 121 | 122 |
| 122 def AddDeviceOptions(parser): | 123 def AddDeviceOptions(option_parser): |
| 123 """Adds device options to |parser|.""" | 124 group = optparse.OptionGroup(option_parser, 'Device Options') |
| 124 group = parser.add_argument_group(title='Device Options') | 125 group.add_option('-c', dest='cleanup_test_files', |
| 125 group.add_argument('-c', dest='cleanup_test_files', | 126 help='Cleanup test files on the device after run', |
| 126 help='Cleanup test files on the device after run', | 127 action='store_true') |
| 127 action='store_true') | 128 group.add_option('--tool', |
| 128 group.add_argument('--tool', | 129 dest='tool', |
| 129 dest='tool', | 130 help=('Run the test under a tool ' |
| 130 help=('Run the test under a tool ' | 131 '(use --tool help to list them)')) |
| 131 '(use --tool help to list them)')) | 132 group.add_option('-d', '--device', dest='test_device', |
| 132 group.add_argument('-d', '--device', dest='test_device', | 133 help=('Target device for the test suite ' |
| 133 help=('Target device for the test suite ' | 134 'to run on.')) |
| 134 'to run on.')) | 135 option_parser.add_option_group(group) |
| 135 | 136 |
| 136 | 137 |
| 137 def AddGTestOptions(parser): | 138 def AddGTestOptions(option_parser): |
| 138 """Adds gtest options to |parser|.""" | 139 """Adds gtest options to |option_parser|.""" |
| 139 | 140 |
| 140 gtest_suites = list(gtest_config.STABLE_TEST_SUITES | 141 option_parser.usage = '%prog gtest [options]' |
| 141 + gtest_config.EXPERIMENTAL_TEST_SUITES) | 142 option_parser.commands_dict = {} |
| 143 option_parser.example = '%prog gtest -s base_unittests' |
| 142 | 144 |
| 143 group = parser.add_argument_group('GTest Options') | 145 # TODO(gkanwar): Make this option required |
| 144 group.add_argument('-s', '--suite', dest='suite_name', choices=gtest_suites, | 146 option_parser.add_option('-s', '--suite', dest='suite_name', |
| 145 nargs='+', metavar='SUITE_NAME', required=True, | 147 help=('Executable name of the test suite to run ' |
| 146 help=('Executable name of the test suite to run.')) | 148 '(use -s help to list them).')) |
| 147 group.add_argument('-f', '--gtest_filter', '--gtest-filter', | 149 option_parser.add_option('-f', '--gtest_filter', '--gtest-filter', |
| 148 dest='test_filter', | 150 dest='test_filter', |
| 149 help='googletest-style filter string.') | 151 help='googletest-style filter string.') |
| 150 group.add_argument('--gtest_also_run_disabled_tests', | 152 option_parser.add_option('--gtest_also_run_disabled_tests', |
| 151 '--gtest-also-run-disabled-tests', | 153 '--gtest-also-run-disabled-tests', |
| 152 dest='run_disabled', action='store_true', | 154 dest='run_disabled', action='store_true', |
| 153 help='Also run disabled tests if applicable.') | 155 help='Also run disabled tests if applicable.') |
| 154 group.add_argument('-a', '--test-arguments', dest='test_arguments', | 156 option_parser.add_option('-a', '--test-arguments', dest='test_arguments', |
| 155 default='', | 157 default='', |
| 156 help='Additional arguments to pass to the test.') | 158 help='Additional arguments to pass to the test.') |
| 157 group.add_argument('-t', dest='timeout', type=int, default=60, | 159 option_parser.add_option('-t', dest='timeout', |
| 158 help='Timeout to wait for each test ' | 160 help='Timeout to wait for each test', |
| 159 '(default: %(default)s).') | 161 type='int', |
| 160 group.add_argument('--isolate_file_path', | 162 default=60) |
| 161 '--isolate-file-path', | 163 option_parser.add_option('--isolate_file_path', |
| 162 dest='isolate_file_path', | 164 '--isolate-file-path', |
| 163 help='.isolate file path to override the default ' | 165 dest='isolate_file_path', |
| 164 'path') | 166 help='.isolate file path to override the default ' |
| 165 AddDeviceOptions(parser) | 167 'path') |
| 166 AddCommonOptions(parser) | 168 |
| 169 AddCommonOptions(option_parser) |
| 170 AddDeviceOptions(option_parser) |
| 167 | 171 |
| 168 | 172 |
| 169 def AddLinkerTestOptions(parser): | 173 def AddLinkerTestOptions(option_parser): |
| 170 group = parser.add_argument_group('Linker Test Options') | 174 option_parser.usage = '%prog linker' |
| 171 group.add_argument('-f', '--gtest-filter', dest='test_filter', | 175 option_parser.commands_dict = {} |
| 172 help='googletest-style filter string.') | 176 option_parser.example = '%prog linker' |
| 173 AddCommonOptions(parser) | 177 |
| 174 AddDeviceOptions(parser) | 178 option_parser.add_option('-f', '--gtest-filter', dest='test_filter', |
| 179 help='googletest-style filter string.') |
| 180 AddCommonOptions(option_parser) |
| 181 AddDeviceOptions(option_parser) |
| 175 | 182 |
| 176 | 183 |
| 177 def AddJavaTestOptions(argument_group): | 184 def ProcessGTestOptions(options): |
| 185 """Intercept test suite help to list test suites. |
| 186 |
| 187 Args: |
| 188 options: Command line options. |
| 189 """ |
| 190 if options.suite_name == 'help': |
| 191 print 'Available test suites are:' |
| 192 for test_suite in (gtest_config.STABLE_TEST_SUITES + |
| 193 gtest_config.EXPERIMENTAL_TEST_SUITES): |
| 194 print test_suite |
| 195 sys.exit(0) |
| 196 |
| 197 # Convert to a list, assuming all test suites if nothing was specified. |
| 198 # TODO(gkanwar): Require having a test suite |
| 199 if options.suite_name: |
| 200 options.suite_name = [options.suite_name] |
| 201 else: |
| 202 options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES] |
| 203 |
| 204 |
| 205 def AddJavaTestOptions(option_parser): |
| 178 """Adds the Java test options to |option_parser|.""" | 206 """Adds the Java test options to |option_parser|.""" |
| 179 | 207 |
| 180 argument_group.add_argument( | 208 option_parser.add_option('-f', '--test-filter', dest='test_filter', |
| 181 '-f', '--test-filter', dest='test_filter', | 209 help=('Test filter (if not fully qualified, ' |
| 182 help=('Test filter (if not fully qualified, will run all matches).')) | 210 'will run all matches).')) |
| 183 argument_group.add_argument( | 211 option_parser.add_option( |
| 184 '-A', '--annotation', dest='annotation_str', | 212 '-A', '--annotation', dest='annotation_str', |
| 185 help=('Comma-separated list of annotations. Run only tests with any of ' | 213 help=('Comma-separated list of annotations. Run only tests with any of ' |
| 186 'the given annotations. An annotation can be either a key or a ' | 214 'the given annotations. An annotation can be either a key or a ' |
| 187 'key-values pair. A test that has no annotation is considered ' | 215 'key-values pair. A test that has no annotation is considered ' |
| 188 '"SmallTest".')) | 216 '"SmallTest".')) |
| 189 argument_group.add_argument( | 217 option_parser.add_option( |
| 190 '-E', '--exclude-annotation', dest='exclude_annotation_str', | 218 '-E', '--exclude-annotation', dest='exclude_annotation_str', |
| 191 help=('Comma-separated list of annotations. Exclude tests with these ' | 219 help=('Comma-separated list of annotations. Exclude tests with these ' |
| 192 'annotations.')) | 220 'annotations.')) |
| 193 argument_group.add_argument( | 221 option_parser.add_option( |
| 194 '--screenshot', dest='screenshot_failures', action='store_true', | 222 '--screenshot', dest='screenshot_failures', action='store_true', |
| 195 help='Capture screenshots of test failures') | 223 help='Capture screenshots of test failures') |
| 196 argument_group.add_argument( | 224 option_parser.add_option( |
| 197 '--save-perf-json', action='store_true', | 225 '--save-perf-json', action='store_true', |
| 198 help='Saves the JSON file for each UI Perf test.') | 226 help='Saves the JSON file for each UI Perf test.') |
| 199 argument_group.add_argument( | 227 option_parser.add_option( |
| 200 '--official-build', action='store_true', help='Run official build tests.') | 228 '--official-build', action='store_true', help='Run official build tests.') |
| 201 argument_group.add_argument( | 229 option_parser.add_option( |
| 202 '--test_data', '--test-data', action='append', default=[], | 230 '--test_data', '--test-data', action='append', default=[], |
| 203 help=('Each instance defines a directory of test data that should be ' | 231 help=('Each instance defines a directory of test data that should be ' |
| 204 'copied to the target(s) before running the tests. The argument ' | 232 'copied to the target(s) before running the tests. The argument ' |
| 205 'should be of the form <target>:<source>, <target> is relative to ' | 233 'should be of the form <target>:<source>, <target> is relative to ' |
| 206 'the device data directory, and <source> is relative to the ' | 234 'the device data directory, and <source> is relative to the ' |
| 207 'chromium build directory.')) | 235 'chromium build directory.')) |
| 208 | 236 |
| 209 | 237 |
| 210 def ProcessJavaTestOptions(args): | 238 def ProcessJavaTestOptions(options): |
| 211 """Processes options/arguments and populates |options| with defaults.""" | 239 """Processes options/arguments and populates |options| with defaults.""" |
| 212 | 240 |
| 213 # TODO(jbudorick): Handle most of this function in argparse. | 241 if options.annotation_str: |
| 214 if args.annotation_str: | 242 options.annotations = options.annotation_str.split(',') |
| 215 args.annotations = args.annotation_str.split(',') | 243 elif options.test_filter: |
| 216 elif args.test_filter: | 244 options.annotations = [] |
| 217 args.annotations = [] | |
| 218 else: | 245 else: |
| 219 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', | 246 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', |
| 220 'EnormousTest', 'IntegrationTest'] | 247 'EnormousTest', 'IntegrationTest'] |
| 221 | 248 |
| 222 if args.exclude_annotation_str: | 249 if options.exclude_annotation_str: |
| 223 args.exclude_annotations = args.exclude_annotation_str.split(',') | 250 options.exclude_annotations = options.exclude_annotation_str.split(',') |
| 224 else: | 251 else: |
| 225 args.exclude_annotations = [] | 252 options.exclude_annotations = [] |
| 226 | 253 |
| 227 | 254 |
| 228 def AddInstrumentationTestOptions(parser): | 255 def AddInstrumentationTestOptions(option_parser): |
| 229 """Adds Instrumentation test options to |parser|.""" | 256 """Adds Instrumentation test options to |option_parser|.""" |
| 230 | 257 |
| 231 parser.usage = '%(prog)s [options]' | 258 option_parser.usage = '%prog instrumentation [options]' |
| 232 | 259 option_parser.commands_dict = {} |
| 233 group = parser.add_argument_group('Instrumentation Test Options') | 260 option_parser.example = ('%prog instrumentation ' |
| 234 AddJavaTestOptions(group) | 261 '--test-apk=ChromeShellTest') |
| 235 | 262 |
| 236 java_or_python_group = group.add_mutually_exclusive_group() | 263 AddJavaTestOptions(option_parser) |
| 237 java_or_python_group.add_argument( | 264 AddCommonOptions(option_parser) |
| 238 '-j', '--java-only', action='store_false', | 265 AddDeviceOptions(option_parser) |
| 239 dest='run_python_tests', default=True, help='Run only the Java tests.') | 266 |
| 240 java_or_python_group.add_argument( | 267 option_parser.add_option('-j', '--java-only', action='store_true', |
| 241 '-p', '--python-only', action='store_false', | 268 default=False, help='Run only the Java tests.') |
| 242 dest='run_java_tests', default=True, | 269 option_parser.add_option('-p', '--python-only', action='store_true', |
| 243 help='Run only the host-driven tests.') | 270 default=False, |
| 244 | 271 help='Run only the host-driven tests.') |
| 245 group.add_argument('--host-driven-root', | 272 option_parser.add_option('--host-driven-root', |
| 246 help='Root of the host-driven tests.') | 273 help='Root of the host-driven tests.') |
| 247 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger', | 274 option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger', |
| 248 action='store_true', | 275 action='store_true', |
| 249 help='Wait for debugger.') | 276 help='Wait for debugger.') |
| 250 group.add_argument('--test-apk', dest='test_apk', required=True, | 277 option_parser.add_option( |
| 251 help=('The name of the apk containing the tests ' | 278 '--test-apk', dest='test_apk', |
| 252 '(without the .apk extension; ' | 279 help=('The name of the apk containing the tests ' |
| 253 'e.g. "ContentShellTest").')) | 280 '(without the .apk extension; e.g. "ContentShellTest").')) |
| 254 group.add_argument('--coverage-dir', | 281 option_parser.add_option('--coverage-dir', |
| 255 help=('Directory in which to place all generated ' | 282 help=('Directory in which to place all generated ' |
| 256 'EMMA coverage files.')) | 283 'EMMA coverage files.')) |
| 257 group.add_argument('--device-flags', dest='device_flags', default='', | 284 option_parser.add_option('--device-flags', dest='device_flags', default='', |
| 258 help='The relative filepath to a file containing ' | 285 help='The relative filepath to a file containing ' |
| 259 'command-line flags to set on the device') | 286 'command-line flags to set on the device') |
| 260 group.add_argument('--isolate_file_path', | 287 option_parser.add_option('--isolate_file_path', |
| 261 '--isolate-file-path', | 288 '--isolate-file-path', |
| 262 dest='isolate_file_path', | 289 dest='isolate_file_path', |
| 263 help='.isolate file path to override the default ' | 290 help='.isolate file path to override the default ' |
| 264 'path') | 291 'path') |
| 265 | 292 |
| 266 AddCommonOptions(parser) | 293 |
| 267 AddDeviceOptions(parser) | 294 def ProcessInstrumentationOptions(options, error_func): |
| 268 | |
| 269 | |
| 270 def ProcessInstrumentationOptions(args): | |
| 271 """Processes options/arguments and populate |options| with defaults. | 295 """Processes options/arguments and populate |options| with defaults. |
| 272 | 296 |
| 273 Args: | 297 Args: |
| 274 args: argparse.Namespace object. | 298 options: optparse.Options object. |
| 299 error_func: Function to call with the error message in case of an error. |
| 275 | 300 |
| 276 Returns: | 301 Returns: |
| 277 An InstrumentationOptions named tuple which contains all options relevant to | 302 An InstrumentationOptions named tuple which contains all options relevant to |
| 278 instrumentation tests. | 303 instrumentation tests. |
| 279 """ | 304 """ |
| 280 | 305 |
| 281 ProcessJavaTestOptions(args) | 306 ProcessJavaTestOptions(options) |
| 282 | 307 |
| 283 if not args.host_driven_root: | 308 if options.java_only and options.python_only: |
| 284 args.run_python_tests = False | 309 error_func('Options java_only (-j) and python_only (-p) ' |
| 285 | 310 'are mutually exclusive.') |
| 286 args.test_apk_path = os.path.join( | 311 options.run_java_tests = True |
| 312 options.run_python_tests = True |
| 313 if options.java_only: |
| 314 options.run_python_tests = False |
| 315 elif options.python_only: |
| 316 options.run_java_tests = False |
| 317 |
| 318 if not options.host_driven_root: |
| 319 options.run_python_tests = False |
| 320 |
| 321 if not options.test_apk: |
| 322 error_func('--test-apk must be specified.') |
| 323 |
| 324 |
| 325 options.test_apk_path = os.path.join( |
| 287 constants.GetOutDirectory(), | 326 constants.GetOutDirectory(), |
| 288 constants.SDK_BUILD_APKS_DIR, | 327 constants.SDK_BUILD_APKS_DIR, |
| 289 '%s.apk' % args.test_apk) | 328 '%s.apk' % options.test_apk) |
| 290 args.test_apk_jar_path = os.path.join( | 329 options.test_apk_jar_path = os.path.join( |
| 291 constants.GetOutDirectory(), | 330 constants.GetOutDirectory(), |
| 292 constants.SDK_BUILD_TEST_JAVALIB_DIR, | 331 constants.SDK_BUILD_TEST_JAVALIB_DIR, |
| 293 '%s.jar' % args.test_apk) | 332 '%s.jar' % options.test_apk) |
| 294 args.test_support_apk_path = '%sSupport%s' % ( | 333 options.test_support_apk_path = '%sSupport%s' % ( |
| 295 os.path.splitext(args.test_apk_path)) | 334 os.path.splitext(options.test_apk_path)) |
| 296 | 335 |
| 297 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path) | 336 options.test_runner = apk_helper.GetInstrumentationName(options.test_apk_path) |
| 298 | 337 |
| 299 # TODO(jbudorick): Get rid of InstrumentationOptions. | |
| 300 return instrumentation_test_options.InstrumentationOptions( | 338 return instrumentation_test_options.InstrumentationOptions( |
| 301 args.tool, | 339 options.tool, |
| 302 args.cleanup_test_files, | 340 options.cleanup_test_files, |
| 303 args.annotations, | 341 options.annotations, |
| 304 args.exclude_annotations, | 342 options.exclude_annotations, |
| 305 args.test_filter, | 343 options.test_filter, |
| 306 args.test_data, | 344 options.test_data, |
| 307 args.save_perf_json, | 345 options.save_perf_json, |
| 308 args.screenshot_failures, | 346 options.screenshot_failures, |
| 309 args.wait_for_debugger, | 347 options.wait_for_debugger, |
| 310 args.coverage_dir, | 348 options.coverage_dir, |
| 311 args.test_apk, | 349 options.test_apk, |
| 312 args.test_apk_path, | 350 options.test_apk_path, |
| 313 args.test_apk_jar_path, | 351 options.test_apk_jar_path, |
| 314 args.test_runner, | 352 options.test_runner, |
| 315 args.test_support_apk_path, | 353 options.test_support_apk_path, |
| 316 args.device_flags, | 354 options.device_flags, |
| 317 args.isolate_file_path | 355 options.isolate_file_path |
| 318 ) | 356 ) |
| 319 | 357 |
| 320 | 358 |
| 321 def AddUIAutomatorTestOptions(parser): | 359 def AddUIAutomatorTestOptions(option_parser): |
| 322 """Adds UI Automator test options to |parser|.""" | 360 """Adds UI Automator test options to |option_parser|.""" |
| 323 | 361 |
| 324 group = parser.add_argument_group('UIAutomator Test Options') | 362 option_parser.usage = '%prog uiautomator [options]' |
| 325 AddJavaTestOptions(group) | 363 option_parser.commands_dict = {} |
| 326 group.add_argument( | 364 option_parser.example = ( |
| 327 '--package', required=True, choices=constants.PACKAGE_INFO.keys(), | 365 '%prog uiautomator --test-jar=chrome_shell_uiautomator_tests' |
| 328 metavar='PACKAGE', help='Package under test.') | 366 ' --package=chrome_shell') |
| 329 group.add_argument( | 367 option_parser.add_option( |
| 330 '--test-jar', dest='test_jar', required=True, | 368 '--package', |
| 369 help=('Package under test. Possible values: %s' % |
| 370 constants.PACKAGE_INFO.keys())) |
| 371 option_parser.add_option( |
| 372 '--test-jar', dest='test_jar', |
| 331 help=('The name of the dexed jar containing the tests (without the ' | 373 help=('The name of the dexed jar containing the tests (without the ' |
| 332 '.dex.jar extension). Alternatively, this can be a full path ' | 374 '.dex.jar extension). Alternatively, this can be a full path ' |
| 333 'to the jar.')) | 375 'to the jar.')) |
| 334 | 376 |
| 335 AddCommonOptions(parser) | 377 AddJavaTestOptions(option_parser) |
| 336 AddDeviceOptions(parser) | 378 AddCommonOptions(option_parser) |
| 337 | 379 AddDeviceOptions(option_parser) |
| 338 | 380 |
| 339 def ProcessUIAutomatorOptions(args): | 381 |
| 382 def ProcessUIAutomatorOptions(options, error_func): |
| 340 """Processes UIAutomator options/arguments. | 383 """Processes UIAutomator options/arguments. |
| 341 | 384 |
| 342 Args: | 385 Args: |
| 343 args: argparse.Namespace object. | 386 options: optparse.Options object. |
| 387 error_func: Function to call with the error message in case of an error. |
| 344 | 388 |
| 345 Returns: | 389 Returns: |
| 346 A UIAutomatorOptions named tuple which contains all options relevant to | 390 A UIAutomatorOptions named tuple which contains all options relevant to |
| 347 uiautomator tests. | 391 uiautomator tests. |
| 348 """ | 392 """ |
| 349 | 393 |
| 350 ProcessJavaTestOptions(args) | 394 ProcessJavaTestOptions(options) |
| 351 | 395 |
| 352 if os.path.exists(args.test_jar): | 396 if not options.package: |
| 397 error_func('--package is required.') |
| 398 |
| 399 if options.package not in constants.PACKAGE_INFO: |
| 400 error_func('Invalid package.') |
| 401 |
| 402 if not options.test_jar: |
| 403 error_func('--test-jar must be specified.') |
| 404 |
| 405 if os.path.exists(options.test_jar): |
| 353 # The dexed JAR is fully qualified, assume the info JAR lives along side. | 406 # The dexed JAR is fully qualified, assume the info JAR lives along side. |
| 354 args.uiautomator_jar = args.test_jar | 407 options.uiautomator_jar = options.test_jar |
| 355 else: | 408 else: |
| 356 args.uiautomator_jar = os.path.join( | 409 options.uiautomator_jar = os.path.join( |
| 357 constants.GetOutDirectory(), | 410 constants.GetOutDirectory(), |
| 358 constants.SDK_BUILD_JAVALIB_DIR, | 411 constants.SDK_BUILD_JAVALIB_DIR, |
| 359 '%s.dex.jar' % args.test_jar) | 412 '%s.dex.jar' % options.test_jar) |
| 360 args.uiautomator_info_jar = ( | 413 options.uiautomator_info_jar = ( |
| 361 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] + | 414 options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] + |
| 362 '_java.jar') | 415 '_java.jar') |
| 363 | 416 |
| 364 return uiautomator_test_options.UIAutomatorOptions( | 417 return uiautomator_test_options.UIAutomatorOptions( |
| 365 args.tool, | 418 options.tool, |
| 366 args.cleanup_test_files, | 419 options.cleanup_test_files, |
| 367 args.annotations, | 420 options.annotations, |
| 368 args.exclude_annotations, | 421 options.exclude_annotations, |
| 369 args.test_filter, | 422 options.test_filter, |
| 370 args.test_data, | 423 options.test_data, |
| 371 args.save_perf_json, | 424 options.save_perf_json, |
| 372 args.screenshot_failures, | 425 options.screenshot_failures, |
| 373 args.uiautomator_jar, | 426 options.uiautomator_jar, |
| 374 args.uiautomator_info_jar, | 427 options.uiautomator_info_jar, |
| 375 args.package) | 428 options.package) |
| 376 | 429 |
| 377 | 430 |
| 378 def AddJUnitTestOptions(parser): | 431 def AddJUnitTestOptions(option_parser): |
| 379 """Adds junit test options to |parser|.""" | 432 """Adds junit test options to |option_parser|.""" |
| 380 | 433 option_parser.usage = '%prog junit -s [test suite name]' |
| 381 group = parser.add_argument_group('JUnit Test Options') | 434 option_parser.commands_dict = {} |
| 382 group.add_argument( | 435 |
| 383 '-s', '--test-suite', dest='test_suite', required=True, | 436 option_parser.add_option( |
| 437 '-s', '--test-suite', dest='test_suite', |
| 384 help=('JUnit test suite to run.')) | 438 help=('JUnit test suite to run.')) |
| 385 group.add_argument( | 439 option_parser.add_option( |
| 386 '-f', '--test-filter', dest='test_filter', | 440 '-f', '--test-filter', dest='test_filter', |
| 387 help='Filters tests googletest-style.') | 441 help='Filters tests googletest-style.') |
| 388 group.add_argument( | 442 option_parser.add_option( |
| 389 '--package-filter', dest='package_filter', | 443 '--package-filter', dest='package_filter', |
| 390 help='Filters tests by package.') | 444 help='Filters tests by package.') |
| 391 group.add_argument( | 445 option_parser.add_option( |
| 392 '--runner-filter', dest='runner_filter', | 446 '--runner-filter', dest='runner_filter', |
| 393 help='Filters tests by runner class. Must be fully qualified.') | 447 help='Filters tests by runner class. Must be fully qualified.') |
| 394 group.add_argument( | 448 option_parser.add_option( |
| 395 '--sdk-version', dest='sdk_version', type=int, | 449 '--sdk-version', dest='sdk_version', type="int", |
| 396 help='The Android SDK version.') | 450 help='The Android SDK version.') |
| 397 AddCommonOptions(parser) | 451 AddCommonOptions(option_parser) |
| 398 | 452 |
| 399 | 453 |
| 400 def AddMonkeyTestOptions(parser): | 454 def ProcessJUnitTestOptions(options, error_func): |
| 401 """Adds monkey test options to |parser|.""" | 455 """Processes all JUnit test options.""" |
| 402 | 456 if not options.test_suite: |
| 403 group = parser.add_argument_group('Monkey Test Options') | 457 error_func('No test suite specified.') |
| 404 group.add_argument( | 458 return options |
| 405 '--package', required=True, choices=constants.PACKAGE_INFO.keys(), | 459 |
| 406 metavar='PACKAGE', help='Package under test.') | 460 |
| 407 group.add_argument( | 461 def AddMonkeyTestOptions(option_parser): |
| 408 '--event-count', default=10000, type=int, | 462 """Adds monkey test options to |option_parser|.""" |
| 409 help='Number of events to generate (default: %(default)s).') | 463 |
| 410 group.add_argument( | 464 option_parser.usage = '%prog monkey [options]' |
| 465 option_parser.commands_dict = {} |
| 466 option_parser.example = ( |
| 467 '%prog monkey --package=chrome_shell') |
| 468 |
| 469 option_parser.add_option( |
| 470 '--package', |
| 471 help=('Package under test. Possible values: %s' % |
| 472 constants.PACKAGE_INFO.keys())) |
| 473 option_parser.add_option( |
| 474 '--event-count', default=10000, type='int', |
| 475 help='Number of events to generate [default: %default].') |
| 476 option_parser.add_option( |
| 411 '--category', default='', | 477 '--category', default='', |
| 412 help='A list of allowed categories.') | 478 help='A list of allowed categories.') |
| 413 group.add_argument( | 479 option_parser.add_option( |
| 414 '--throttle', default=100, type=int, | 480 '--throttle', default=100, type='int', |
| 415 help='Delay between events (ms) (default: %(default)s). ') | 481 help='Delay between events (ms) [default: %default]. ') |
| 416 group.add_argument( | 482 option_parser.add_option( |
| 417 '--seed', type=int, | 483 '--seed', type='int', |
| 418 help=('Seed value for pseudo-random generator. Same seed value generates ' | 484 help=('Seed value for pseudo-random generator. Same seed value generates ' |
| 419 'the same sequence of events. Seed is randomized by default.')) | 485 'the same sequence of events. Seed is randomized by default.')) |
| 420 group.add_argument( | 486 option_parser.add_option( |
| 421 '--extra-args', default='', | 487 '--extra-args', default='', |
| 422 help=('String of other args to pass to the command verbatim.')) | 488 help=('String of other args to pass to the command verbatim ' |
| 423 | 489 '[default: "%default"].')) |
| 424 AddCommonOptions(parser) | 490 |
| 425 AddDeviceOptions(parser) | 491 AddCommonOptions(option_parser) |
| 426 | 492 AddDeviceOptions(option_parser) |
| 427 | 493 |
| 428 def ProcessMonkeyTestOptions(args): | 494 |
| 495 def ProcessMonkeyTestOptions(options, error_func): |
| 429 """Processes all monkey test options. | 496 """Processes all monkey test options. |
| 430 | 497 |
| 431 Args: | 498 Args: |
| 432 args: argparse.Namespace object. | 499 options: optparse.Options object. |
| 500 error_func: Function to call with the error message in case of an error. |
| 433 | 501 |
| 434 Returns: | 502 Returns: |
| 435 A MonkeyOptions named tuple which contains all options relevant to | 503 A MonkeyOptions named tuple which contains all options relevant to |
| 436 monkey tests. | 504 monkey tests. |
| 437 """ | 505 """ |
| 438 # TODO(jbudorick): Handle this directly in argparse with nargs='+' | 506 if not options.package: |
| 439 category = args.category | 507 error_func('--package is required.') |
| 508 |
| 509 if options.package not in constants.PACKAGE_INFO: |
| 510 error_func('Invalid package.') |
| 511 |
| 512 category = options.category |
| 440 if category: | 513 if category: |
| 441 category = args.category.split(',') | 514 category = options.category.split(',') |
| 442 | 515 |
| 443 # TODO(jbudorick): Get rid of MonkeyOptions. | |
| 444 return monkey_test_options.MonkeyOptions( | 516 return monkey_test_options.MonkeyOptions( |
| 445 args.verbose_count, | 517 options.verbose_count, |
| 446 args.package, | 518 options.package, |
| 447 args.event_count, | 519 options.event_count, |
| 448 category, | 520 category, |
| 449 args.throttle, | 521 options.throttle, |
| 450 args.seed, | 522 options.seed, |
| 451 args.extra_args) | 523 options.extra_args) |
| 452 | 524 |
| 453 | 525 |
| 454 def AddPerfTestOptions(parser): | 526 def AddPerfTestOptions(option_parser): |
| 455 """Adds perf test options to |parser|.""" | 527 """Adds perf test options to |option_parser|.""" |
| 456 | 528 |
| 457 group = parser.add_argument_group('Perf Test Options') | 529 option_parser.usage = '%prog perf [options]' |
| 458 | 530 option_parser.commands_dict = {} |
| 459 class SingleStepAction(argparse.Action): | 531 option_parser.example = ('%prog perf ' |
| 460 def __call__(self, parser, namespace, values, option_string=None): | 532 '[--single-step -- command args] or ' |
| 461 if values and not namespace.single_step: | 533 '[--steps perf_steps.json] or ' |
| 462 parser.error('single step command provided, ' | 534 '[--print-step step]') |
| 463 'but --single-step not specified.') | 535 |
| 464 elif namespace.single_step and not values: | 536 option_parser.add_option( |
| 465 parser.error('--single-step specified, ' | 537 '--single-step', |
| 466 'but no single step command provided.') | 538 action='store_true', |
| 467 setattr(namespace, self.dest, values) | |
| 468 | |
| 469 step_group = group.add_mutually_exclusive_group(required=True) | |
| 470 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER. | |
| 471 # This requires removing "--" from client calls. | |
| 472 step_group.add_argument( | |
| 473 '--single-step', action='store_true', | |
| 474 help='Execute the given command with retries, but only print the result ' | 539 help='Execute the given command with retries, but only print the result ' |
| 475 'for the "most successful" round.') | 540 'for the "most successful" round.') |
| 476 step_group.add_argument( | 541 option_parser.add_option( |
| 477 '--steps', | 542 '--steps', |
| 478 help='JSON file containing the list of commands to run.') | 543 help='JSON file containing the list of commands to run.') |
| 479 step_group.add_argument( | 544 option_parser.add_option( |
| 480 '--print-step', | 545 '--flaky-steps', |
| 481 help='The name of a previously executed perf step to print.') | 546 help=('A JSON file containing steps that are flaky ' |
| 482 | 547 'and will have its exit code ignored.')) |
| 483 group.add_argument( | 548 option_parser.add_option( |
| 484 '--output-json-list', | 549 '--output-json-list', |
| 485 help='Write a simple list of names from --steps into the given file.') | 550 help='Write a simple list of names from --steps into the given file.') |
| 486 group.add_argument( | 551 option_parser.add_option( |
| 487 '--collect-chartjson-data', | 552 '--collect-chartjson-data', |
| 488 action='store_true', | 553 action='store_true', |
| 489 help='Cache the chartjson output from each step for later use.') | 554 help='Cache the chartjson output from each step for later use.') |
| 490 group.add_argument( | 555 option_parser.add_option( |
| 491 '--output-chartjson-data', | 556 '--output-chartjson-data', |
| 492 default='', | 557 default='', |
| 493 help='Write out chartjson into the given file.') | 558 help='Write out chartjson into the given file.') |
| 494 group.add_argument( | 559 option_parser.add_option( |
| 495 '--flaky-steps', | 560 '--print-step', |
| 496 help=('A JSON file containing steps that are flaky ' | 561 help='The name of a previously executed perf step to print.') |
| 497 'and will have its exit code ignored.')) | 562 option_parser.add_option( |
| 498 group.add_argument( | |
| 499 '--no-timeout', action='store_true', | 563 '--no-timeout', action='store_true', |
| 500 help=('Do not impose a timeout. Each perf step is responsible for ' | 564 help=('Do not impose a timeout. Each perf step is responsible for ' |
| 501 'implementing the timeout logic.')) | 565 'implementing the timeout logic.')) |
| 502 group.add_argument( | 566 option_parser.add_option( |
| 503 '-f', '--test-filter', | 567 '-f', '--test-filter', |
| 504 help=('Test filter (will match against the names listed in --steps).')) | 568 help=('Test filter (will match against the names listed in --steps).')) |
| 505 group.add_argument( | 569 option_parser.add_option( |
| 506 '--dry-run', action='store_true', | 570 '--dry-run', |
| 571 action='store_true', |
| 507 help='Just print the steps without executing.') | 572 help='Just print the steps without executing.') |
| 508 group.add_argument('single_step_command', nargs='*', action=SingleStepAction, | 573 AddCommonOptions(option_parser) |
| 509 help='If --single-step is specified, the command to run.') | 574 AddDeviceOptions(option_parser) |
| 510 AddCommonOptions(parser) | 575 |
| 511 AddDeviceOptions(parser) | 576 |
| 512 | 577 def ProcessPerfTestOptions(options, args, error_func): |
| 513 | |
| 514 def ProcessPerfTestOptions(args): | |
| 515 """Processes all perf test options. | 578 """Processes all perf test options. |
| 516 | 579 |
| 517 Args: | 580 Args: |
| 518 args: argparse.Namespace object. | 581 options: optparse.Options object. |
| 582 error_func: Function to call with the error message in case of an error. |
| 519 | 583 |
| 520 Returns: | 584 Returns: |
| 521 A PerfOptions named tuple which contains all options relevant to | 585 A PerfOptions named tuple which contains all options relevant to |
| 522 perf tests. | 586 perf tests. |
| 523 """ | 587 """ |
| 524 # TODO(jbudorick): Move single_step handling down into the perf tests. | 588 # Only one of steps, print_step or single_step must be provided. |
| 525 if args.single_step: | 589 count = len(filter(None, |
| 526 args.single_step = ' '.join(args.single_step_command) | 590 [options.steps, options.print_step, options.single_step])) |
| 527 # TODO(jbudorick): Get rid of PerfOptions. | 591 if count != 1: |
| 592 error_func('Please specify one of: --steps, --print-step, --single-step.') |
| 593 single_step = None |
| 594 if options.single_step: |
| 595 single_step = ' '.join(args[2:]) |
| 528 return perf_test_options.PerfOptions( | 596 return perf_test_options.PerfOptions( |
| 529 args.steps, args.flaky_steps, args.output_json_list, | 597 options.steps, options.flaky_steps, options.output_json_list, |
| 530 args.print_step, args.no_timeout, args.test_filter, | 598 options.print_step, options.no_timeout, options.test_filter, |
| 531 args.dry_run, args.single_step, args.collect_chartjson_data, | 599 options.dry_run, single_step, options.collect_chartjson_data, |
| 532 args.output_chartjson_data) | 600 options.output_chartjson_data) |
| 533 | 601 |
| 534 | 602 |
| 535 def AddPythonTestOptions(parser): | 603 def AddPythonTestOptions(option_parser): |
| 536 group = parser.add_argument_group('Python Test Options') | 604 option_parser.add_option('-s', '--suite', dest='suite_name', |
| 537 group.add_argument( | 605 help=('Name of the test suite to run' |
| 538 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME', | 606 '(use -s help to list them).')) |
| 539 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(), | 607 AddCommonOptions(option_parser) |
| 540 help='Name of the test suite to run.') | 608 |
| 541 AddCommonOptions(parser) | 609 |
| 542 | 610 def ProcessPythonTestOptions(options, error_func): |
| 543 | 611 if options.suite_name not in constants.PYTHON_UNIT_TEST_SUITES: |
| 544 def _RunGTests(args, devices): | 612 available = ('Available test suites: [%s]' % |
| 613 ', '.join(constants.PYTHON_UNIT_TEST_SUITES.iterkeys())) |
| 614 if options.suite_name == 'help': |
| 615 print available |
| 616 else: |
| 617 error_func('"%s" is not a valid suite. %s' % |
| 618 (options.suite_name, available)) |
| 619 |
| 620 |
| 621 def _RunGTests(options, devices): |
| 545 """Subcommand of RunTestsCommands which runs gtests.""" | 622 """Subcommand of RunTestsCommands which runs gtests.""" |
| 623 ProcessGTestOptions(options) |
| 624 |
| 546 exit_code = 0 | 625 exit_code = 0 |
| 547 for suite_name in args.suite_name: | 626 for suite_name in options.suite_name: |
| 548 # TODO(jbudorick): Either deprecate multi-suite or move its handling down | 627 # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for |
| 549 # into the gtest code. | 628 # the gtest command. |
| 550 gtest_options = gtest_test_options.GTestOptions( | 629 gtest_options = gtest_test_options.GTestOptions( |
| 551 args.tool, | 630 options.tool, |
| 552 args.cleanup_test_files, | 631 options.cleanup_test_files, |
| 553 args.test_filter, | 632 options.test_filter, |
| 554 args.run_disabled, | 633 options.run_disabled, |
| 555 args.test_arguments, | 634 options.test_arguments, |
| 556 args.timeout, | 635 options.timeout, |
| 557 args.isolate_file_path, | 636 options.isolate_file_path, |
| 558 suite_name) | 637 suite_name) |
| 559 runner_factory, tests = gtest_setup.Setup(gtest_options, devices) | 638 runner_factory, tests = gtest_setup.Setup(gtest_options, devices) |
| 560 | 639 |
| 561 results, test_exit_code = test_dispatcher.RunTests( | 640 results, test_exit_code = test_dispatcher.RunTests( |
| 562 tests, runner_factory, devices, shard=True, test_timeout=None, | 641 tests, runner_factory, devices, shard=True, test_timeout=None, |
| 563 num_retries=args.num_retries) | 642 num_retries=options.num_retries) |
| 564 | 643 |
| 565 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | 644 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
| 566 exit_code = test_exit_code | 645 exit_code = test_exit_code |
| 567 | 646 |
| 568 report_results.LogFull( | 647 report_results.LogFull( |
| 569 results=results, | 648 results=results, |
| 570 test_type='Unit test', | 649 test_type='Unit test', |
| 571 test_package=suite_name, | 650 test_package=suite_name, |
| 572 flakiness_server=args.flakiness_dashboard_server) | 651 flakiness_server=options.flakiness_dashboard_server) |
| 573 | 652 |
| 574 if args.json_results_file: | 653 if options.json_results_file: |
| 575 json_results.GenerateJsonResultsFile(results, args.json_results_file) | 654 json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| 576 | 655 |
| 577 if os.path.isdir(constants.ISOLATE_DEPS_DIR): | 656 if os.path.isdir(constants.ISOLATE_DEPS_DIR): |
| 578 shutil.rmtree(constants.ISOLATE_DEPS_DIR) | 657 shutil.rmtree(constants.ISOLATE_DEPS_DIR) |
| 579 | 658 |
| 580 return exit_code | 659 return exit_code |
| 581 | 660 |
| 582 | 661 |
| 583 def _RunLinkerTests(args, devices): | 662 def _RunLinkerTests(options, devices): |
| 584 """Subcommand of RunTestsCommands which runs linker tests.""" | 663 """Subcommand of RunTestsCommands which runs linker tests.""" |
| 585 runner_factory, tests = linker_setup.Setup(args, devices) | 664 runner_factory, tests = linker_setup.Setup(options, devices) |
| 586 | 665 |
| 587 results, exit_code = test_dispatcher.RunTests( | 666 results, exit_code = test_dispatcher.RunTests( |
| 588 tests, runner_factory, devices, shard=True, test_timeout=60, | 667 tests, runner_factory, devices, shard=True, test_timeout=60, |
| 589 num_retries=args.num_retries) | 668 num_retries=options.num_retries) |
| 590 | 669 |
| 591 report_results.LogFull( | 670 report_results.LogFull( |
| 592 results=results, | 671 results=results, |
| 593 test_type='Linker test', | 672 test_type='Linker test', |
| 594 test_package='ChromiumLinkerTest') | 673 test_package='ChromiumLinkerTest') |
| 595 | 674 |
| 596 if args.json_results_file: | 675 if options.json_results_file: |
| 597 json_results.GenerateJsonResultsFile(results, args.json_results_file) | 676 json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| 598 | 677 |
| 599 return exit_code | 678 return exit_code |
| 600 | 679 |
| 601 | 680 |
| 602 def _RunInstrumentationTests(args, devices): | 681 def _RunInstrumentationTests(options, error_func, devices): |
| 603 """Subcommand of RunTestsCommands which runs instrumentation tests.""" | 682 """Subcommand of RunTestsCommands which runs instrumentation tests.""" |
| 604 logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices))) | 683 instrumentation_options = ProcessInstrumentationOptions(options, error_func) |
| 605 | 684 |
| 606 instrumentation_options = ProcessInstrumentationOptions(args) | 685 if len(devices) > 1 and options.wait_for_debugger: |
| 607 | |
| 608 if len(devices) > 1 and args.wait_for_debugger: | |
| 609 logging.warning('Debugger can not be sharded, using first available device') | 686 logging.warning('Debugger can not be sharded, using first available device') |
| 610 devices = devices[:1] | 687 devices = devices[:1] |
| 611 | 688 |
| 612 results = base_test_result.TestRunResults() | 689 results = base_test_result.TestRunResults() |
| 613 exit_code = 0 | 690 exit_code = 0 |
| 614 | 691 |
| 615 if args.run_java_tests: | 692 if options.run_java_tests: |
| 616 runner_factory, tests = instrumentation_setup.Setup( | 693 runner_factory, tests = instrumentation_setup.Setup( |
| 617 instrumentation_options, devices) | 694 instrumentation_options, devices) |
| 618 | 695 |
| 619 test_results, exit_code = test_dispatcher.RunTests( | 696 test_results, exit_code = test_dispatcher.RunTests( |
| 620 tests, runner_factory, devices, shard=True, test_timeout=None, | 697 tests, runner_factory, devices, shard=True, test_timeout=None, |
| 621 num_retries=args.num_retries) | 698 num_retries=options.num_retries) |
| 622 | 699 |
| 623 results.AddTestRunResults(test_results) | 700 results.AddTestRunResults(test_results) |
| 624 | 701 |
| 625 if args.run_python_tests: | 702 if options.run_python_tests: |
| 626 runner_factory, tests = host_driven_setup.InstrumentationSetup( | 703 runner_factory, tests = host_driven_setup.InstrumentationSetup( |
| 627 args.host_driven_root, args.official_build, | 704 options.host_driven_root, options.official_build, |
| 628 instrumentation_options) | 705 instrumentation_options) |
| 629 | 706 |
| 630 if tests: | 707 if tests: |
| 631 test_results, test_exit_code = test_dispatcher.RunTests( | 708 test_results, test_exit_code = test_dispatcher.RunTests( |
| 632 tests, runner_factory, devices, shard=True, test_timeout=None, | 709 tests, runner_factory, devices, shard=True, test_timeout=None, |
| 633 num_retries=args.num_retries) | 710 num_retries=options.num_retries) |
| 634 | 711 |
| 635 results.AddTestRunResults(test_results) | 712 results.AddTestRunResults(test_results) |
| 636 | 713 |
| 637 # Only allow exit code escalation | 714 # Only allow exit code escalation |
| 638 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | 715 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
| 639 exit_code = test_exit_code | 716 exit_code = test_exit_code |
| 640 | 717 |
| 641 if args.device_flags: | 718 if options.device_flags: |
| 642 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT, | 719 options.device_flags = os.path.join(constants.DIR_SOURCE_ROOT, |
| 643 args.device_flags) | 720 options.device_flags) |
| 644 | 721 |
| 645 report_results.LogFull( | 722 report_results.LogFull( |
| 646 results=results, | 723 results=results, |
| 647 test_type='Instrumentation', | 724 test_type='Instrumentation', |
| 648 test_package=os.path.basename(args.test_apk), | 725 test_package=os.path.basename(options.test_apk), |
| 649 annotation=args.annotations, | 726 annotation=options.annotations, |
| 650 flakiness_server=args.flakiness_dashboard_server) | 727 flakiness_server=options.flakiness_dashboard_server) |
| 651 | 728 |
| 652 if args.json_results_file: | 729 if options.json_results_file: |
| 653 json_results.GenerateJsonResultsFile(results, args.json_results_file) | 730 json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| 654 | 731 |
| 655 return exit_code | 732 return exit_code |
| 656 | 733 |
| 657 | 734 |
| 658 def _RunUIAutomatorTests(args, devices): | 735 def _RunUIAutomatorTests(options, error_func, devices): |
| 659 """Subcommand of RunTestsCommands which runs uiautomator tests.""" | 736 """Subcommand of RunTestsCommands which runs uiautomator tests.""" |
| 660 uiautomator_options = ProcessUIAutomatorOptions(args) | 737 uiautomator_options = ProcessUIAutomatorOptions(options, error_func) |
| 661 | 738 |
| 662 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) | 739 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) |
| 663 | 740 |
| 664 results, exit_code = test_dispatcher.RunTests( | 741 results, exit_code = test_dispatcher.RunTests( |
| 665 tests, runner_factory, devices, shard=True, test_timeout=None, | 742 tests, runner_factory, devices, shard=True, test_timeout=None, |
| 666 num_retries=args.num_retries) | 743 num_retries=options.num_retries) |
| 667 | 744 |
| 668 report_results.LogFull( | 745 report_results.LogFull( |
| 669 results=results, | 746 results=results, |
| 670 test_type='UIAutomator', | 747 test_type='UIAutomator', |
| 671 test_package=os.path.basename(args.test_jar), | 748 test_package=os.path.basename(options.test_jar), |
| 672 annotation=args.annotations, | 749 annotation=options.annotations, |
| 673 flakiness_server=args.flakiness_dashboard_server) | 750 flakiness_server=options.flakiness_dashboard_server) |
| 674 | 751 |
| 675 if args.json_results_file: | 752 if options.json_results_file: |
| 676 json_results.GenerateJsonResultsFile(results, args.json_results_file) | 753 json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| 677 | 754 |
| 678 return exit_code | 755 return exit_code |
| 679 | 756 |
| 680 | 757 |
| 681 def _RunJUnitTests(args): | 758 def _RunJUnitTests(options, error_func): |
| 682 """Subcommand of RunTestsCommand which runs junit tests.""" | 759 """Subcommand of RunTestsCommand which runs junit tests.""" |
| 683 runner_factory, tests = junit_setup.Setup(args) | 760 junit_options = ProcessJUnitTestOptions(options, error_func) |
| 761 runner_factory, tests = junit_setup.Setup(junit_options) |
| 684 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory) | 762 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory) |
| 763 |
| 685 return exit_code | 764 return exit_code |
| 686 | 765 |
| 687 | 766 |
| 688 def _RunMonkeyTests(args, devices): | 767 def _RunMonkeyTests(options, error_func, devices): |
| 689 """Subcommand of RunTestsCommands which runs monkey tests.""" | 768 """Subcommand of RunTestsCommands which runs monkey tests.""" |
| 690 monkey_options = ProcessMonkeyTestOptions(args) | 769 monkey_options = ProcessMonkeyTestOptions(options, error_func) |
| 691 | 770 |
| 692 runner_factory, tests = monkey_setup.Setup(monkey_options) | 771 runner_factory, tests = monkey_setup.Setup(monkey_options) |
| 693 | 772 |
| 694 results, exit_code = test_dispatcher.RunTests( | 773 results, exit_code = test_dispatcher.RunTests( |
| 695 tests, runner_factory, devices, shard=False, test_timeout=None, | 774 tests, runner_factory, devices, shard=False, test_timeout=None, |
| 696 num_retries=args.num_retries) | 775 num_retries=options.num_retries) |
| 697 | 776 |
| 698 report_results.LogFull( | 777 report_results.LogFull( |
| 699 results=results, | 778 results=results, |
| 700 test_type='Monkey', | 779 test_type='Monkey', |
| 701 test_package='Monkey') | 780 test_package='Monkey') |
| 702 | 781 |
| 703 if args.json_results_file: | 782 if options.json_results_file: |
| 704 json_results.GenerateJsonResultsFile(results, args.json_results_file) | 783 json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| 705 | 784 |
| 706 return exit_code | 785 return exit_code |
| 707 | 786 |
| 708 | 787 |
| 709 def _RunPerfTests(args): | 788 def _RunPerfTests(options, args, error_func): |
| 710 """Subcommand of RunTestsCommands which runs perf tests.""" | 789 """Subcommand of RunTestsCommands which runs perf tests.""" |
| 711 perf_options = ProcessPerfTestOptions(args) | 790 perf_options = ProcessPerfTestOptions(options, args, error_func) |
| 712 | 791 |
| 713 # Just save a simple json with a list of test names. | 792 # Just save a simple json with a list of test names. |
| 714 if perf_options.output_json_list: | 793 if perf_options.output_json_list: |
| 715 return perf_test_runner.OutputJsonList( | 794 return perf_test_runner.OutputJsonList( |
| 716 perf_options.steps, perf_options.output_json_list) | 795 perf_options.steps, perf_options.output_json_list) |
| 717 | 796 |
| 718 if perf_options.output_chartjson_data: | 797 if perf_options.output_chartjson_data: |
| 719 return perf_test_runner.OutputChartjson( | 798 return perf_test_runner.OutputChartjson( |
| 720 perf_options.print_step, perf_options.output_chartjson_data) | 799 perf_options.print_step, perf_options.output_chartjson_data) |
| 721 | 800 |
| 722 # Just print the results from a single previously executed step. | 801 # Just print the results from a single previously executed step. |
| 723 if perf_options.print_step: | 802 if perf_options.print_step: |
| 724 return perf_test_runner.PrintTestOutput(perf_options.print_step) | 803 return perf_test_runner.PrintTestOutput(perf_options.print_step) |
| 725 | 804 |
| 726 runner_factory, tests, devices = perf_setup.Setup(perf_options) | 805 runner_factory, tests, devices = perf_setup.Setup(perf_options) |
| 727 | 806 |
| 728 # shard=False means that each device will get the full list of tests | 807 # shard=False means that each device will get the full list of tests |
| 729 # and then each one will decide their own affinity. | 808 # and then each one will decide their own affinity. |
| 730 # shard=True means each device will pop the next test available from a queue, | 809 # shard=True means each device will pop the next test available from a queue, |
| 731 # which increases throughput but have no affinity. | 810 # which increases throughput but have no affinity. |
| 732 results, _ = test_dispatcher.RunTests( | 811 results, _ = test_dispatcher.RunTests( |
| 733 tests, runner_factory, devices, shard=False, test_timeout=None, | 812 tests, runner_factory, devices, shard=False, test_timeout=None, |
| 734 num_retries=args.num_retries) | 813 num_retries=options.num_retries) |
| 735 | 814 |
| 736 report_results.LogFull( | 815 report_results.LogFull( |
| 737 results=results, | 816 results=results, |
| 738 test_type='Perf', | 817 test_type='Perf', |
| 739 test_package='Perf') | 818 test_package='Perf') |
| 740 | 819 |
| 741 if args.json_results_file: | 820 if options.json_results_file: |
| 742 json_results.GenerateJsonResultsFile(results, args.json_results_file) | 821 json_results.GenerateJsonResultsFile(results, options.json_results_file) |
| 743 | 822 |
| 744 if perf_options.single_step: | 823 if perf_options.single_step: |
| 745 return perf_test_runner.PrintTestOutput('single_step') | 824 return perf_test_runner.PrintTestOutput('single_step') |
| 746 | 825 |
| 747 perf_test_runner.PrintSummary(tests) | 826 perf_test_runner.PrintSummary(tests) |
| 748 | 827 |
| 749 # Always return 0 on the sharding stage. Individual tests exit_code | 828 # Always return 0 on the sharding stage. Individual tests exit_code |
| 750 # will be returned on the print_step stage. | 829 # will be returned on the print_step stage. |
| 751 return 0 | 830 return 0 |
| 752 | 831 |
| 753 | 832 |
| 754 def _RunPythonTests(args): | 833 def _RunPythonTests(options, error_func): |
| 755 """Subcommand of RunTestsCommand which runs python unit tests.""" | 834 """Subcommand of RunTestsCommand which runs python unit tests.""" |
| 756 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name] | 835 ProcessPythonTestOptions(options, error_func) |
| 836 |
| 837 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[options.suite_name] |
| 757 suite_path = suite_vars['path'] | 838 suite_path = suite_vars['path'] |
| 758 suite_test_modules = suite_vars['test_modules'] | 839 suite_test_modules = suite_vars['test_modules'] |
| 759 | 840 |
| 760 sys.path = [suite_path] + sys.path | 841 sys.path = [suite_path] + sys.path |
| 761 try: | 842 try: |
| 762 suite = unittest.TestSuite() | 843 suite = unittest.TestSuite() |
| 763 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m) | 844 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m) |
| 764 for m in suite_test_modules) | 845 for m in suite_test_modules) |
| 765 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count) | 846 runner = unittest.TextTestRunner(verbosity=1+options.verbose_count) |
| 766 return 0 if runner.run(suite).wasSuccessful() else 1 | 847 return 0 if runner.run(suite).wasSuccessful() else 1 |
| 767 finally: | 848 finally: |
| 768 sys.path = sys.path[1:] | 849 sys.path = sys.path[1:] |
| 769 | 850 |
| 770 | 851 |
| 771 def _GetAttachedDevices(test_device=None): | 852 def _GetAttachedDevices(test_device=None): |
| 772 """Get all attached devices. | 853 """Get all attached devices. |
| 773 | 854 |
| 774 Args: | 855 Args: |
| 775 test_device: Name of a specific device to use. | 856 test_device: Name of a specific device to use. |
| 776 | 857 |
| 777 Returns: | 858 Returns: |
| 778 A list of attached devices. | 859 A list of attached devices. |
| 779 """ | 860 """ |
| 780 attached_devices = [] | 861 attached_devices = [] |
| 781 | 862 |
| 782 attached_devices = android_commands.GetAttachedDevices() | 863 attached_devices = android_commands.GetAttachedDevices() |
| 783 if test_device: | 864 if test_device: |
| 784 assert test_device in attached_devices, ( | 865 assert test_device in attached_devices, ( |
| 785 'Did not find device %s among attached device. Attached devices: %s' | 866 'Did not find device %s among attached device. Attached devices: %s' |
| 786 % (test_device, ', '.join(attached_devices))) | 867 % (test_device, ', '.join(attached_devices))) |
| 787 attached_devices = [test_device] | 868 attached_devices = [test_device] |
| 788 | 869 |
| 789 assert attached_devices, 'No devices attached.' | 870 assert attached_devices, 'No devices attached.' |
| 790 | 871 |
| 791 return sorted(attached_devices) | 872 return sorted(attached_devices) |
| 792 | 873 |
| 793 | 874 |
| 794 def RunTestsCommand(args, parser): | 875 def RunTestsCommand(command, options, args, option_parser): |
| 795 """Checks test type and dispatches to the appropriate function. | 876 """Checks test type and dispatches to the appropriate function. |
| 796 | 877 |
| 797 Args: | 878 Args: |
| 798 args: argparse.Namespace object. | 879 command: String indicating the command that was received to trigger |
| 799 parser: argparse.ArgumentParser object. | 880 this function. |
| 881 options: optparse options dictionary. |
| 882 args: List of extra args from optparse. |
| 883 option_parser: optparse.OptionParser object. |
| 800 | 884 |
| 801 Returns: | 885 Returns: |
| 802 Integer indicated exit code. | 886 Integer indicated exit code. |
| 803 | 887 |
| 804 Raises: | 888 Raises: |
| 805 Exception: Unknown command name passed in, or an exception from an | 889 Exception: Unknown command name passed in, or an exception from an |
| 806 individual test runner. | 890 individual test runner. |
| 807 """ | 891 """ |
| 808 command = args.command | |
| 809 | 892 |
| 810 ProcessCommonOptions(args) | 893 # Check for extra arguments |
| 894 if len(args) > 2 and command != 'perf': |
| 895 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:]))) |
| 896 return constants.ERROR_EXIT_CODE |
| 897 if command == 'perf': |
| 898 if ((options.single_step and len(args) <= 2) or |
| 899 (not options.single_step and len(args) > 2)): |
| 900 option_parser.error('Unrecognized arguments: %s' % (' '.join(args))) |
| 901 return constants.ERROR_EXIT_CODE |
| 811 | 902 |
| 812 if args.enable_platform_mode: | 903 ProcessCommonOptions(options, option_parser.error) |
| 813 return RunTestsInPlatformMode(args, parser.error) | 904 |
| 905 if options.enable_platform_mode: |
| 906 return RunTestsInPlatformMode(command, options, option_parser) |
| 814 | 907 |
| 815 if command in constants.LOCAL_MACHINE_TESTS: | 908 if command in constants.LOCAL_MACHINE_TESTS: |
| 816 devices = [] | 909 devices = [] |
| 817 else: | 910 else: |
| 818 devices = _GetAttachedDevices(args.test_device) | 911 devices = _GetAttachedDevices(options.test_device) |
| 819 | 912 |
| 820 forwarder.Forwarder.RemoveHostLog() | 913 forwarder.Forwarder.RemoveHostLog() |
| 821 if not ports.ResetTestServerPortAllocation(): | 914 if not ports.ResetTestServerPortAllocation(): |
| 822 raise Exception('Failed to reset test server port.') | 915 raise Exception('Failed to reset test server port.') |
| 823 | 916 |
| 824 if command == 'gtest': | 917 if command == 'gtest': |
| 825 return _RunGTests(args, devices) | 918 return _RunGTests(options, devices) |
| 826 elif command == 'linker': | 919 elif command == 'linker': |
| 827 return _RunLinkerTests(args, devices) | 920 return _RunLinkerTests(options, devices) |
| 828 elif command == 'instrumentation': | 921 elif command == 'instrumentation': |
| 829 return _RunInstrumentationTests(args, devices) | 922 return _RunInstrumentationTests(options, option_parser.error, devices) |
| 830 elif command == 'uiautomator': | 923 elif command == 'uiautomator': |
| 831 return _RunUIAutomatorTests(args, devices) | 924 return _RunUIAutomatorTests(options, option_parser.error, devices) |
| 832 elif command == 'junit': | 925 elif command == 'junit': |
| 833 return _RunJUnitTests(args) | 926 return _RunJUnitTests(options, option_parser.error) |
| 834 elif command == 'monkey': | 927 elif command == 'monkey': |
| 835 return _RunMonkeyTests(args, devices) | 928 return _RunMonkeyTests(options, option_parser.error, devices) |
| 836 elif command == 'perf': | 929 elif command == 'perf': |
| 837 return _RunPerfTests(args) | 930 return _RunPerfTests(options, args, option_parser.error) |
| 838 elif command == 'python': | 931 elif command == 'python': |
| 839 return _RunPythonTests(args) | 932 return _RunPythonTests(options, option_parser.error) |
| 840 else: | 933 else: |
| 841 raise Exception('Unknown test type.') | 934 raise Exception('Unknown test type.') |
| 842 | 935 |
| 843 | 936 |
| 844 _SUPPORTED_IN_PLATFORM_MODE = [ | 937 _SUPPORTED_IN_PLATFORM_MODE = [ |
| 845 # TODO(jbudorick): Add support for more test types. | 938 # TODO(jbudorick): Add support for more test types. |
| 846 'gtest', | 939 'gtest', |
| 847 ] | 940 ] |
| 848 | 941 |
| 849 | 942 |
| 850 def RunTestsInPlatformMode(args, parser): | 943 def RunTestsInPlatformMode(command, options, option_parser): |
| 851 | 944 |
| 852 if args.command not in _SUPPORTED_IN_PLATFORM_MODE: | 945 if command not in _SUPPORTED_IN_PLATFORM_MODE: |
| 853 parser.error('%s is not yet supported in platform mode' % args.command) | 946 option_parser.error('%s is not yet supported in platform mode' % command) |
| 854 | 947 |
| 855 with environment_factory.CreateEnvironment(args, parser.error) as env: | 948 with environment_factory.CreateEnvironment( |
| 856 with test_instance_factory.CreateTestInstance(args, parser.error) as test: | 949 command, options, option_parser.error) as env: |
| 950 with test_instance_factory.CreateTestInstance( |
| 951 command, options, option_parser.error) as test: |
| 857 with test_run_factory.CreateTestRun( | 952 with test_run_factory.CreateTestRun( |
| 858 args, env, test, parser.error) as test_run: | 953 options, env, test, option_parser.error) as test_run: |
| 859 results = test_run.RunTests() | 954 results = test_run.RunTests() |
| 860 | 955 |
| 861 report_results.LogFull( | 956 report_results.LogFull( |
| 862 results=results, | 957 results=results, |
| 863 test_type=test.TestType(), | 958 test_type=test.TestType(), |
| 864 test_package=test_run.TestPackage(), | 959 test_package=test_run.TestPackage(), |
| 865 annotation=args.annotations, | 960 annotation=options.annotations, |
| 866 flakiness_server=args.flakiness_dashboard_server) | 961 flakiness_server=options.flakiness_dashboard_server) |
| 867 | 962 |
| 868 if args.json_results_file: | 963 if options.json_results_file: |
| 869 json_results.GenerateJsonResultsFile( | 964 json_results.GenerateJsonResultsFile( |
| 870 results, args.json_results_file) | 965 results, options.json_results_file) |
| 871 | 966 |
| 872 return results | 967 return results |
| 873 | 968 |
| 874 | 969 |
| 875 CommandConfigTuple = collections.namedtuple( | 970 def HelpCommand(command, _options, args, option_parser): |
| 876 'CommandConfigTuple', | 971 """Display help for a certain command, or overall help. |
| 877 ['add_options_func', 'help_txt']) | 972 |
| 973 Args: |
| 974 command: String indicating the command that was received to trigger |
| 975 this function. |
| 976 options: optparse options dictionary. unused. |
| 977 args: List of extra args from optparse. |
| 978 option_parser: optparse.OptionParser object. |
| 979 |
| 980 Returns: |
| 981 Integer indicated exit code. |
| 982 """ |
| 983 # If we don't have any args, display overall help |
| 984 if len(args) < 3: |
| 985 option_parser.print_help() |
| 986 return 0 |
| 987 # If we have too many args, print an error |
| 988 if len(args) > 3: |
| 989 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:]))) |
| 990 return constants.ERROR_EXIT_CODE |
| 991 |
| 992 command = args[2] |
| 993 |
| 994 if command not in VALID_COMMANDS: |
| 995 option_parser.error('Unrecognized command.') |
| 996 |
| 997 # Treat the help command as a special case. We don't care about showing a |
| 998 # specific help page for itself. |
| 999 if command == 'help': |
| 1000 option_parser.print_help() |
| 1001 return 0 |
| 1002 |
| 1003 VALID_COMMANDS[command].add_options_func(option_parser) |
| 1004 option_parser.usage = '%prog ' + command + ' [options]' |
| 1005 option_parser.commands_dict = {} |
| 1006 option_parser.print_help() |
| 1007 |
| 1008 return 0 |
| 1009 |
| 1010 |
| 1011 # Define a named tuple for the values in the VALID_COMMANDS dictionary so the |
| 1012 # syntax is a bit prettier. The tuple is two functions: (add options, run |
| 1013 # command). |
| 1014 CommandFunctionTuple = collections.namedtuple( |
| 1015 'CommandFunctionTuple', ['add_options_func', 'run_command_func']) |
| 878 VALID_COMMANDS = { | 1016 VALID_COMMANDS = { |
| 879 'gtest': CommandConfigTuple( | 1017 'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand), |
| 880 AddGTestOptions, | 1018 'instrumentation': CommandFunctionTuple( |
| 881 'googletest-based C++ tests'), | 1019 AddInstrumentationTestOptions, RunTestsCommand), |
| 882 'instrumentation': CommandConfigTuple( | 1020 'uiautomator': CommandFunctionTuple( |
| 883 AddInstrumentationTestOptions, | 1021 AddUIAutomatorTestOptions, RunTestsCommand), |
| 884 'InstrumentationTestCase-based Java tests'), | 1022 'junit': CommandFunctionTuple( |
| 885 'uiautomator': CommandConfigTuple( | 1023 AddJUnitTestOptions, RunTestsCommand), |
| 886 AddUIAutomatorTestOptions, | 1024 'monkey': CommandFunctionTuple( |
| 887 "Tests that run via Android's uiautomator command"), | 1025 AddMonkeyTestOptions, RunTestsCommand), |
| 888 'junit': CommandConfigTuple( | 1026 'perf': CommandFunctionTuple( |
| 889 AddJUnitTestOptions, | 1027 AddPerfTestOptions, RunTestsCommand), |
| 890 'JUnit4-based Java tests'), | 1028 'python': CommandFunctionTuple( |
| 891 'monkey': CommandConfigTuple( | 1029 AddPythonTestOptions, RunTestsCommand), |
| 892 AddMonkeyTestOptions, | 1030 'linker': CommandFunctionTuple( |
| 893 "Tests based on Android's monkey"), | 1031 AddLinkerTestOptions, RunTestsCommand), |
| 894 'perf': CommandConfigTuple( | 1032 'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand) |
| 895 AddPerfTestOptions, | 1033 } |
| 896 'Performance tests'), | |
| 897 'python': CommandConfigTuple( | |
| 898 AddPythonTestOptions, | |
| 899 'Python tests based on unittest.TestCase'), | |
| 900 'linker': CommandConfigTuple( | |
| 901 AddLinkerTestOptions, | |
| 902 'Linker tests'), | |
| 903 } | |
| 904 | 1034 |
| 905 | 1035 |
| 906 def DumpThreadStacks(_signal, _frame): | 1036 def DumpThreadStacks(_signal, _frame): |
| 907 for thread in threading.enumerate(): | 1037 for thread in threading.enumerate(): |
| 908 reraiser_thread.LogThreadStack(thread) | 1038 reraiser_thread.LogThreadStack(thread) |
| 909 | 1039 |
| 910 | 1040 |
| 911 def main(): | 1041 def main(): |
| 912 signal.signal(signal.SIGUSR1, DumpThreadStacks) | 1042 signal.signal(signal.SIGUSR1, DumpThreadStacks) |
| 913 | 1043 option_parser = command_option_parser.CommandOptionParser( |
| 914 parser = argparse.ArgumentParser() | 1044 commands_dict=VALID_COMMANDS) |
| 915 command_parsers = parser.add_subparsers(title='test types', | 1045 return command_option_parser.ParseAndExecute(option_parser) |
| 916 dest='command') | |
| 917 | |
| 918 for test_type, config in sorted(VALID_COMMANDS.iteritems(), | |
| 919 key=lambda x: x[0]): | |
| 920 subparser = command_parsers.add_parser( | |
| 921 test_type, usage='%(prog)s [options]', help=config.help_txt) | |
| 922 config.add_options_func(subparser) | |
| 923 | |
| 924 args = parser.parse_args() | |
| 925 RunTestsCommand(args, parser) | |
| 926 | |
| 927 return 0 | |
| 928 | 1046 |
| 929 | 1047 |
| 930 if __name__ == '__main__': | 1048 if __name__ == '__main__': |
| 931 sys.exit(main()) | 1049 sys.exit(main()) |
| OLD | NEW |