OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # | 2 # |
3 # Copyright 2013 The Chromium Authors. All rights reserved. | 3 # Copyright 2013 The Chromium Authors. All rights reserved. |
4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
6 | 6 |
7 """Runs all types of tests from one unified interface.""" | 7 """Runs all types of tests from one unified interface.""" |
8 | 8 |
| 9 import argparse |
9 import collections | 10 import collections |
10 import logging | 11 import logging |
11 import optparse | |
12 import os | 12 import os |
13 import shutil | 13 import shutil |
14 import signal | 14 import signal |
15 import sys | 15 import sys |
16 import threading | 16 import threading |
17 import unittest | 17 import unittest |
18 | 18 |
19 from pylib import android_commands | 19 from pylib import android_commands |
20 from pylib import constants | 20 from pylib import constants |
21 from pylib import forwarder | 21 from pylib import forwarder |
(...skipping 15 matching lines...) Expand all Loading... |
37 from pylib.monkey import setup as monkey_setup | 37 from pylib.monkey import setup as monkey_setup |
38 from pylib.monkey import test_options as monkey_test_options | 38 from pylib.monkey import test_options as monkey_test_options |
39 from pylib.perf import setup as perf_setup | 39 from pylib.perf import setup as perf_setup |
40 from pylib.perf import test_options as perf_test_options | 40 from pylib.perf import test_options as perf_test_options |
41 from pylib.perf import test_runner as perf_test_runner | 41 from pylib.perf import test_runner as perf_test_runner |
42 from pylib.results import json_results | 42 from pylib.results import json_results |
43 from pylib.results import report_results | 43 from pylib.results import report_results |
44 from pylib.uiautomator import setup as uiautomator_setup | 44 from pylib.uiautomator import setup as uiautomator_setup |
45 from pylib.uiautomator import test_options as uiautomator_test_options | 45 from pylib.uiautomator import test_options as uiautomator_test_options |
46 from pylib.utils import apk_helper | 46 from pylib.utils import apk_helper |
47 from pylib.utils import command_option_parser | |
48 from pylib.utils import reraiser_thread | 47 from pylib.utils import reraiser_thread |
49 from pylib.utils import run_tests_helper | 48 from pylib.utils import run_tests_helper |
50 | 49 |
51 | 50 |
52 def AddCommonOptions(option_parser): | 51 def AddCommonOptions(parser): |
53 """Adds all common options to |option_parser|.""" | 52 """Adds all common options to |parser|.""" |
54 | 53 |
55 group = optparse.OptionGroup(option_parser, 'Common Options') | 54 group = parser.add_argument_group('Common Options') |
| 55 |
56 default_build_type = os.environ.get('BUILDTYPE', 'Debug') | 56 default_build_type = os.environ.get('BUILDTYPE', 'Debug') |
57 group.add_option('--debug', action='store_const', const='Debug', | 57 |
58 dest='build_type', default=default_build_type, | 58 debug_or_release_group = group.add_mutually_exclusive_group() |
59 help=('If set, run test suites under out/Debug. ' | 59 debug_or_release_group.add_argument( |
60 'Default is env var BUILDTYPE or Debug.')) | 60 '--debug', action='store_const', const='Debug', dest='build_type', |
61 group.add_option('--release', action='store_const', | 61 default=default_build_type, |
62 const='Release', dest='build_type', | 62 help=('If set, run test suites under out/Debug. ' |
63 help=('If set, run test suites under out/Release.' | 63 'Default is env var BUILDTYPE or Debug.')) |
64 ' Default is env var BUILDTYPE or Debug.')) | 64 debug_or_release_group.add_argument( |
65 group.add_option('--build-directory', dest='build_directory', | 65 '--release', action='store_const', const='Release', dest='build_type', |
66 help=('Path to the directory in which build files are' | 66 help=('If set, run test suites under out/Release. ' |
67 ' located (should not include build type)')) | 67 'Default is env var BUILDTYPE or Debug.')) |
68 group.add_option('--output-directory', dest='output_directory', | 68 |
69 help=('Path to the directory in which build files are' | 69 group.add_argument('--build-directory', dest='build_directory', |
70 ' located (must include build type). This will take' | 70 help=('Path to the directory in which build files are' |
71 ' precedence over --debug, --release and' | 71 ' located (should not include build type)')) |
72 ' --build-directory')) | 72 group.add_argument('--output-directory', dest='output_directory', |
73 group.add_option('--num_retries', dest='num_retries', type='int', | 73 help=('Path to the directory in which build files are' |
74 default=2, | 74 ' located (must include build type). This will take' |
75 help=('Number of retries for a test before ' | 75 ' precedence over --debug, --release and' |
76 'giving up.')) | 76 ' --build-directory')) |
77 group.add_option('-v', | 77 group.add_argument('--num_retries', dest='num_retries', type=int, default=2, |
78 '--verbose', | 78 help=('Number of retries for a test before ' |
79 dest='verbose_count', | 79 'giving up (default: %(default)s).')) |
80 default=0, | 80 group.add_argument('-v', |
81 action='count', | 81 '--verbose', |
82 help='Verbose level (multiple times for more)') | 82 dest='verbose_count', |
83 group.add_option('--flakiness-dashboard-server', | 83 default=0, |
84 dest='flakiness_dashboard_server', | 84 action='count', |
85 help=('Address of the server that is hosting the ' | 85 help='Verbose level (multiple times for more)') |
86 'Chrome for Android flakiness dashboard.')) | 86 group.add_argument('--flakiness-dashboard-server', |
87 group.add_option('--enable-platform-mode', action='store_true', | 87 dest='flakiness_dashboard_server', |
88 help=('Run the test scripts in platform mode, which ' | 88 help=('Address of the server that is hosting the ' |
89 'conceptually separates the test runner from the ' | 89 'Chrome for Android flakiness dashboard.')) |
90 '"device" (local or remote, real or emulated) on ' | 90 group.add_argument('--enable-platform-mode', action='store_true', |
91 'which the tests are running. [experimental]')) | 91 help=('Run the test scripts in platform mode, which ' |
92 group.add_option('-e', '--environment', default='local', | 92 'conceptually separates the test runner from the ' |
93 help=('Test environment to run in. Must be one of: %s' % | 93 '"device" (local or remote, real or emulated) on ' |
94 ', '.join(constants.VALID_ENVIRONMENTS))) | 94 'which the tests are running. [experimental]')) |
95 group.add_option('--adb-path', | 95 group.add_argument('-e', '--environment', default='local', |
96 help=('Specify the absolute path of the adb binary that ' | 96 choices=constants.VALID_ENVIRONMENTS, |
97 'should be used.')) | 97 help='Test environment to run in (default: %(default)s).') |
98 group.add_option('--json-results-file', dest='json_results_file', | 98 group.add_argument('--adb-path', |
99 help='If set, will dump results in JSON format ' | 99 help=('Specify the absolute path of the adb binary that ' |
100 'to specified file.') | 100 'should be used.')) |
101 option_parser.add_option_group(group) | 101 group.add_argument('--json-results-file', dest='json_results_file', |
| 102 help='If set, will dump results in JSON form ' |
| 103 'to specified file.') |
102 | 104 |
103 | 105 |
104 def ProcessCommonOptions(options, error_func): | 106 def ProcessCommonOptions(args): |
105 """Processes and handles all common options.""" | 107 """Processes and handles all common options.""" |
106 run_tests_helper.SetLogLevel(options.verbose_count) | 108 run_tests_helper.SetLogLevel(args.verbose_count) |
107 constants.SetBuildType(options.build_type) | 109 constants.SetBuildType(args.build_type) |
108 if options.build_directory: | 110 if args.build_directory: |
109 constants.SetBuildDirectory(options.build_directory) | 111 constants.SetBuildDirectory(args.build_directory) |
110 if options.output_directory: | 112 if args.output_directory: |
111 constants.SetOutputDirectort(options.output_directory) | 113 constants.SetOutputDirectort(args.output_directory) |
112 if options.adb_path: | 114 if args.adb_path: |
113 constants.SetAdbPath(options.adb_path) | 115 constants.SetAdbPath(args.adb_path) |
114 # Some things such as Forwarder require ADB to be in the environment path. | 116 # Some things such as Forwarder require ADB to be in the environment path. |
115 adb_dir = os.path.dirname(constants.GetAdbPath()) | 117 adb_dir = os.path.dirname(constants.GetAdbPath()) |
116 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep): | 118 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep): |
117 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH'] | 119 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH'] |
118 if options.environment not in constants.VALID_ENVIRONMENTS: | |
119 error_func('--environment must be one of: %s' % | |
120 ', '.join(constants.VALID_ENVIRONMENTS)) | |
121 | 120 |
122 | 121 |
123 def AddDeviceOptions(option_parser): | 122 def AddDeviceOptions(parser): |
124 group = optparse.OptionGroup(option_parser, 'Device Options') | 123 """Adds device options to |parser|.""" |
125 group.add_option('-c', dest='cleanup_test_files', | 124 group = parser.add_argument_group(title='Device Options') |
126 help='Cleanup test files on the device after run', | 125 group.add_argument('-c', dest='cleanup_test_files', |
127 action='store_true') | 126 help='Cleanup test files on the device after run', |
128 group.add_option('--tool', | 127 action='store_true') |
129 dest='tool', | 128 group.add_argument('--tool', |
130 help=('Run the test under a tool ' | 129 dest='tool', |
131 '(use --tool help to list them)')) | 130 help=('Run the test under a tool ' |
132 group.add_option('-d', '--device', dest='test_device', | 131 '(use --tool help to list them)')) |
133 help=('Target device for the test suite ' | 132 group.add_argument('-d', '--device', dest='test_device', |
134 'to run on.')) | 133 help=('Target device for the test suite ' |
135 option_parser.add_option_group(group) | 134 'to run on.')) |
136 | 135 |
137 | 136 |
138 def AddGTestOptions(option_parser): | 137 def AddGTestOptions(parser): |
139 """Adds gtest options to |option_parser|.""" | 138 """Adds gtest options to |parser|.""" |
140 | 139 |
141 option_parser.usage = '%prog gtest [options]' | 140 gtest_suites = list(gtest_config.STABLE_TEST_SUITES |
142 option_parser.commands_dict = {} | 141 + gtest_config.EXPERIMENTAL_TEST_SUITES) |
143 option_parser.example = '%prog gtest -s base_unittests' | |
144 | 142 |
145 # TODO(gkanwar): Make this option required | 143 group = parser.add_argument_group('GTest Options') |
146 option_parser.add_option('-s', '--suite', dest='suite_name', | 144 group.add_argument('-s', '--suite', dest='suite_name', choices=gtest_suites, |
147 help=('Executable name of the test suite to run ' | 145 nargs='+', metavar='SUITE_NAME', required=True, |
148 '(use -s help to list them).')) | 146 help=('Executable name of the test suite to run.')) |
149 option_parser.add_option('-f', '--gtest_filter', '--gtest-filter', | 147 group.add_argument('-f', '--gtest_filter', '--gtest-filter', |
150 dest='test_filter', | 148 dest='test_filter', |
151 help='googletest-style filter string.') | 149 help='googletest-style filter string.') |
152 option_parser.add_option('--gtest_also_run_disabled_tests', | 150 group.add_argument('--gtest_also_run_disabled_tests', |
153 '--gtest-also-run-disabled-tests', | 151 '--gtest-also-run-disabled-tests', |
154 dest='run_disabled', action='store_true', | 152 dest='run_disabled', action='store_true', |
155 help='Also run disabled tests if applicable.') | 153 help='Also run disabled tests if applicable.') |
156 option_parser.add_option('-a', '--test-arguments', dest='test_arguments', | 154 group.add_argument('-a', '--test-arguments', dest='test_arguments', |
157 default='', | 155 default='', |
158 help='Additional arguments to pass to the test.') | 156 help='Additional arguments to pass to the test.') |
159 option_parser.add_option('-t', dest='timeout', | 157 group.add_argument('-t', dest='timeout', type=int, default=60, |
160 help='Timeout to wait for each test', | 158 help='Timeout to wait for each test ' |
161 type='int', | 159 '(default: %(default)s).') |
162 default=60) | 160 group.add_argument('--isolate_file_path', |
163 option_parser.add_option('--isolate_file_path', | 161 '--isolate-file-path', |
164 '--isolate-file-path', | 162 dest='isolate_file_path', |
165 dest='isolate_file_path', | 163 help='.isolate file path to override the default ' |
166 help='.isolate file path to override the default ' | 164 'path') |
167 'path') | 165 AddDeviceOptions(parser) |
168 | 166 AddCommonOptions(parser) |
169 AddCommonOptions(option_parser) | |
170 AddDeviceOptions(option_parser) | |
171 | 167 |
172 | 168 |
173 def AddLinkerTestOptions(option_parser): | 169 def AddLinkerTestOptions(parser): |
174 option_parser.usage = '%prog linker' | 170 group = parser.add_argument_group('Linker Test Options') |
175 option_parser.commands_dict = {} | 171 group.add_argument('-f', '--gtest-filter', dest='test_filter', |
176 option_parser.example = '%prog linker' | 172 help='googletest-style filter string.') |
177 | 173 AddCommonOptions(parser) |
178 option_parser.add_option('-f', '--gtest-filter', dest='test_filter', | 174 AddDeviceOptions(parser) |
179 help='googletest-style filter string.') | |
180 AddCommonOptions(option_parser) | |
181 AddDeviceOptions(option_parser) | |
182 | 175 |
183 | 176 |
184 def ProcessGTestOptions(options): | 177 def AddJavaTestOptions(argument_group): |
185 """Intercept test suite help to list test suites. | |
186 | |
187 Args: | |
188 options: Command line options. | |
189 """ | |
190 if options.suite_name == 'help': | |
191 print 'Available test suites are:' | |
192 for test_suite in (gtest_config.STABLE_TEST_SUITES + | |
193 gtest_config.EXPERIMENTAL_TEST_SUITES): | |
194 print test_suite | |
195 sys.exit(0) | |
196 | |
197 # Convert to a list, assuming all test suites if nothing was specified. | |
198 # TODO(gkanwar): Require having a test suite | |
199 if options.suite_name: | |
200 options.suite_name = [options.suite_name] | |
201 else: | |
202 options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES] | |
203 | |
204 | |
205 def AddJavaTestOptions(option_parser): | |
206 """Adds the Java test options to |option_parser|.""" | 178 """Adds the Java test options to |option_parser|.""" |
207 | 179 |
208 option_parser.add_option('-f', '--test-filter', dest='test_filter', | 180 argument_group.add_argument( |
209 help=('Test filter (if not fully qualified, ' | 181 '-f', '--test-filter', dest='test_filter', |
210 'will run all matches).')) | 182 help=('Test filter (if not fully qualified, will run all matches).')) |
211 option_parser.add_option( | 183 argument_group.add_argument( |
212 '-A', '--annotation', dest='annotation_str', | 184 '-A', '--annotation', dest='annotation_str', |
213 help=('Comma-separated list of annotations. Run only tests with any of ' | 185 help=('Comma-separated list of annotations. Run only tests with any of ' |
214 'the given annotations. An annotation can be either a key or a ' | 186 'the given annotations. An annotation can be either a key or a ' |
215 'key-values pair. A test that has no annotation is considered ' | 187 'key-values pair. A test that has no annotation is considered ' |
216 '"SmallTest".')) | 188 '"SmallTest".')) |
217 option_parser.add_option( | 189 argument_group.add_argument( |
218 '-E', '--exclude-annotation', dest='exclude_annotation_str', | 190 '-E', '--exclude-annotation', dest='exclude_annotation_str', |
219 help=('Comma-separated list of annotations. Exclude tests with these ' | 191 help=('Comma-separated list of annotations. Exclude tests with these ' |
220 'annotations.')) | 192 'annotations.')) |
221 option_parser.add_option( | 193 argument_group.add_argument( |
222 '--screenshot', dest='screenshot_failures', action='store_true', | 194 '--screenshot', dest='screenshot_failures', action='store_true', |
223 help='Capture screenshots of test failures') | 195 help='Capture screenshots of test failures') |
224 option_parser.add_option( | 196 argument_group.add_argument( |
225 '--save-perf-json', action='store_true', | 197 '--save-perf-json', action='store_true', |
226 help='Saves the JSON file for each UI Perf test.') | 198 help='Saves the JSON file for each UI Perf test.') |
227 option_parser.add_option( | 199 argument_group.add_argument( |
228 '--official-build', action='store_true', help='Run official build tests.') | 200 '--official-build', action='store_true', help='Run official build tests.') |
229 option_parser.add_option( | 201 argument_group.add_argument( |
230 '--test_data', '--test-data', action='append', default=[], | 202 '--test_data', '--test-data', action='append', default=[], |
231 help=('Each instance defines a directory of test data that should be ' | 203 help=('Each instance defines a directory of test data that should be ' |
232 'copied to the target(s) before running the tests. The argument ' | 204 'copied to the target(s) before running the tests. The argument ' |
233 'should be of the form <target>:<source>, <target> is relative to ' | 205 'should be of the form <target>:<source>, <target> is relative to ' |
234 'the device data directory, and <source> is relative to the ' | 206 'the device data directory, and <source> is relative to the ' |
235 'chromium build directory.')) | 207 'chromium build directory.')) |
236 | 208 |
237 | 209 |
238 def ProcessJavaTestOptions(options): | 210 def ProcessJavaTestOptions(args): |
239 """Processes options/arguments and populates |options| with defaults.""" | 211 """Processes options/arguments and populates |options| with defaults.""" |
240 | 212 |
241 if options.annotation_str: | 213 # TODO(jbudorick): Handle most of this function in argparse. |
242 options.annotations = options.annotation_str.split(',') | 214 if args.annotation_str: |
243 elif options.test_filter: | 215 args.annotations = args.annotation_str.split(',') |
244 options.annotations = [] | 216 elif args.test_filter: |
| 217 args.annotations = [] |
245 else: | 218 else: |
246 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', | 219 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', |
247 'EnormousTest', 'IntegrationTest'] | 220 'EnormousTest', 'IntegrationTest'] |
248 | 221 |
249 if options.exclude_annotation_str: | 222 if args.exclude_annotation_str: |
250 options.exclude_annotations = options.exclude_annotation_str.split(',') | 223 args.exclude_annotations = args.exclude_annotation_str.split(',') |
251 else: | 224 else: |
252 options.exclude_annotations = [] | 225 args.exclude_annotations = [] |
253 | 226 |
254 | 227 |
255 def AddInstrumentationTestOptions(option_parser): | 228 def AddInstrumentationTestOptions(parser): |
256 """Adds Instrumentation test options to |option_parser|.""" | 229 """Adds Instrumentation test options to |parser|.""" |
257 | 230 |
258 option_parser.usage = '%prog instrumentation [options]' | 231 parser.usage = '%(prog)s [options]' |
259 option_parser.commands_dict = {} | 232 |
260 option_parser.example = ('%prog instrumentation ' | 233 group = parser.add_argument_group('Instrumentation Test Options') |
261 '--test-apk=ChromeShellTest') | 234 AddJavaTestOptions(group) |
262 | 235 |
263 AddJavaTestOptions(option_parser) | 236 java_or_python_group = group.add_mutually_exclusive_group() |
264 AddCommonOptions(option_parser) | 237 java_or_python_group.add_argument( |
265 AddDeviceOptions(option_parser) | 238 '-j', '--java-only', action='store_false', |
266 | 239 dest='run_python_tests', default=True, help='Run only the Java tests.') |
267 option_parser.add_option('-j', '--java-only', action='store_true', | 240 java_or_python_group.add_argument( |
268 default=False, help='Run only the Java tests.') | 241 '-p', '--python-only', action='store_false', |
269 option_parser.add_option('-p', '--python-only', action='store_true', | 242 dest='run_java_tests', default=True, |
270 default=False, | 243 help='Run only the host-driven tests.') |
271 help='Run only the host-driven tests.') | 244 |
272 option_parser.add_option('--host-driven-root', | 245 group.add_argument('--host-driven-root', |
273 help='Root of the host-driven tests.') | 246 help='Root of the host-driven tests.') |
274 option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger', | 247 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger', |
275 action='store_true', | 248 action='store_true', |
276 help='Wait for debugger.') | 249 help='Wait for debugger.') |
277 option_parser.add_option( | 250 group.add_argument('--test-apk', dest='test_apk', required=True, |
278 '--test-apk', dest='test_apk', | 251 help=('The name of the apk containing the tests ' |
279 help=('The name of the apk containing the tests ' | 252 '(without the .apk extension; ' |
280 '(without the .apk extension; e.g. "ContentShellTest").')) | 253 'e.g. "ContentShellTest").')) |
281 option_parser.add_option('--coverage-dir', | 254 group.add_argument('--coverage-dir', |
282 help=('Directory in which to place all generated ' | 255 help=('Directory in which to place all generated ' |
283 'EMMA coverage files.')) | 256 'EMMA coverage files.')) |
284 option_parser.add_option('--device-flags', dest='device_flags', default='', | 257 group.add_argument('--device-flags', dest='device_flags', default='', |
285 help='The relative filepath to a file containing ' | 258 help='The relative filepath to a file containing ' |
286 'command-line flags to set on the device') | 259 'command-line flags to set on the device') |
287 option_parser.add_option('--isolate_file_path', | 260 group.add_argument('--isolate_file_path', |
288 '--isolate-file-path', | 261 '--isolate-file-path', |
289 dest='isolate_file_path', | 262 dest='isolate_file_path', |
290 help='.isolate file path to override the default ' | 263 help='.isolate file path to override the default ' |
291 'path') | 264 'path') |
292 | 265 |
293 | 266 AddCommonOptions(parser) |
294 def ProcessInstrumentationOptions(options, error_func): | 267 AddDeviceOptions(parser) |
| 268 |
| 269 |
| 270 def ProcessInstrumentationOptions(args): |
295 """Processes options/arguments and populate |options| with defaults. | 271 """Processes options/arguments and populate |options| with defaults. |
296 | 272 |
297 Args: | 273 Args: |
298 options: optparse.Options object. | 274 args: argparse.Namespace object. |
299 error_func: Function to call with the error message in case of an error. | |
300 | 275 |
301 Returns: | 276 Returns: |
302 An InstrumentationOptions named tuple which contains all options relevant to | 277 An InstrumentationOptions named tuple which contains all options relevant to |
303 instrumentation tests. | 278 instrumentation tests. |
304 """ | 279 """ |
305 | 280 |
306 ProcessJavaTestOptions(options) | 281 ProcessJavaTestOptions(args) |
307 | 282 |
308 if options.java_only and options.python_only: | 283 if not args.host_driven_root: |
309 error_func('Options java_only (-j) and python_only (-p) ' | 284 args.run_python_tests = False |
310 'are mutually exclusive.') | 285 |
311 options.run_java_tests = True | 286 args.test_apk_path = os.path.join( |
312 options.run_python_tests = True | |
313 if options.java_only: | |
314 options.run_python_tests = False | |
315 elif options.python_only: | |
316 options.run_java_tests = False | |
317 | |
318 if not options.host_driven_root: | |
319 options.run_python_tests = False | |
320 | |
321 if not options.test_apk: | |
322 error_func('--test-apk must be specified.') | |
323 | |
324 | |
325 options.test_apk_path = os.path.join( | |
326 constants.GetOutDirectory(), | 287 constants.GetOutDirectory(), |
327 constants.SDK_BUILD_APKS_DIR, | 288 constants.SDK_BUILD_APKS_DIR, |
328 '%s.apk' % options.test_apk) | 289 '%s.apk' % args.test_apk) |
329 options.test_apk_jar_path = os.path.join( | 290 args.test_apk_jar_path = os.path.join( |
330 constants.GetOutDirectory(), | 291 constants.GetOutDirectory(), |
331 constants.SDK_BUILD_TEST_JAVALIB_DIR, | 292 constants.SDK_BUILD_TEST_JAVALIB_DIR, |
332 '%s.jar' % options.test_apk) | 293 '%s.jar' % args.test_apk) |
333 options.test_support_apk_path = '%sSupport%s' % ( | 294 args.test_support_apk_path = '%sSupport%s' % ( |
334 os.path.splitext(options.test_apk_path)) | 295 os.path.splitext(args.test_apk_path)) |
335 | 296 |
336 options.test_runner = apk_helper.GetInstrumentationName(options.test_apk_path) | 297 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path) |
337 | 298 |
| 299 # TODO(jbudorick): Get rid of InstrumentationOptions. |
338 return instrumentation_test_options.InstrumentationOptions( | 300 return instrumentation_test_options.InstrumentationOptions( |
339 options.tool, | 301 args.tool, |
340 options.cleanup_test_files, | 302 args.cleanup_test_files, |
341 options.annotations, | 303 args.annotations, |
342 options.exclude_annotations, | 304 args.exclude_annotations, |
343 options.test_filter, | 305 args.test_filter, |
344 options.test_data, | 306 args.test_data, |
345 options.save_perf_json, | 307 args.save_perf_json, |
346 options.screenshot_failures, | 308 args.screenshot_failures, |
347 options.wait_for_debugger, | 309 args.wait_for_debugger, |
348 options.coverage_dir, | 310 args.coverage_dir, |
349 options.test_apk, | 311 args.test_apk, |
350 options.test_apk_path, | 312 args.test_apk_path, |
351 options.test_apk_jar_path, | 313 args.test_apk_jar_path, |
352 options.test_runner, | 314 args.test_runner, |
353 options.test_support_apk_path, | 315 args.test_support_apk_path, |
354 options.device_flags, | 316 args.device_flags, |
355 options.isolate_file_path | 317 args.isolate_file_path |
356 ) | 318 ) |
357 | 319 |
358 | 320 |
359 def AddUIAutomatorTestOptions(option_parser): | 321 def AddUIAutomatorTestOptions(parser): |
360 """Adds UI Automator test options to |option_parser|.""" | 322 """Adds UI Automator test options to |parser|.""" |
361 | 323 |
362 option_parser.usage = '%prog uiautomator [options]' | 324 group = parser.add_argument_group('UIAutomator Test Options') |
363 option_parser.commands_dict = {} | 325 AddJavaTestOptions(group) |
364 option_parser.example = ( | 326 group.add_argument( |
365 '%prog uiautomator --test-jar=chrome_shell_uiautomator_tests' | 327 '--package', required=True, choices=constants.PACKAGE_INFO.keys(), |
366 ' --package=chrome_shell') | 328 metavar='PACKAGE', help='Package under test.') |
367 option_parser.add_option( | 329 group.add_argument( |
368 '--package', | 330 '--test-jar', dest='test_jar', required=True, |
369 help=('Package under test. Possible values: %s' % | |
370 constants.PACKAGE_INFO.keys())) | |
371 option_parser.add_option( | |
372 '--test-jar', dest='test_jar', | |
373 help=('The name of the dexed jar containing the tests (without the ' | 331 help=('The name of the dexed jar containing the tests (without the ' |
374 '.dex.jar extension). Alternatively, this can be a full path ' | 332 '.dex.jar extension). Alternatively, this can be a full path ' |
375 'to the jar.')) | 333 'to the jar.')) |
376 | 334 |
377 AddJavaTestOptions(option_parser) | 335 AddCommonOptions(parser) |
378 AddCommonOptions(option_parser) | 336 AddDeviceOptions(parser) |
379 AddDeviceOptions(option_parser) | 337 |
380 | 338 |
381 | 339 def ProcessUIAutomatorOptions(args): |
382 def ProcessUIAutomatorOptions(options, error_func): | |
383 """Processes UIAutomator options/arguments. | 340 """Processes UIAutomator options/arguments. |
384 | 341 |
385 Args: | 342 Args: |
386 options: optparse.Options object. | 343 args: argparse.Namespace object. |
387 error_func: Function to call with the error message in case of an error. | |
388 | 344 |
389 Returns: | 345 Returns: |
390 A UIAutomatorOptions named tuple which contains all options relevant to | 346 A UIAutomatorOptions named tuple which contains all options relevant to |
391 uiautomator tests. | 347 uiautomator tests. |
392 """ | 348 """ |
393 | 349 |
394 ProcessJavaTestOptions(options) | 350 ProcessJavaTestOptions(args) |
395 | 351 |
396 if not options.package: | 352 if os.path.exists(args.test_jar): |
397 error_func('--package is required.') | |
398 | |
399 if options.package not in constants.PACKAGE_INFO: | |
400 error_func('Invalid package.') | |
401 | |
402 if not options.test_jar: | |
403 error_func('--test-jar must be specified.') | |
404 | |
405 if os.path.exists(options.test_jar): | |
406 # The dexed JAR is fully qualified, assume the info JAR lives along side. | 353 # The dexed JAR is fully qualified, assume the info JAR lives along side. |
407 options.uiautomator_jar = options.test_jar | 354 args.uiautomator_jar = args.test_jar |
408 else: | 355 else: |
409 options.uiautomator_jar = os.path.join( | 356 args.uiautomator_jar = os.path.join( |
410 constants.GetOutDirectory(), | 357 constants.GetOutDirectory(), |
411 constants.SDK_BUILD_JAVALIB_DIR, | 358 constants.SDK_BUILD_JAVALIB_DIR, |
412 '%s.dex.jar' % options.test_jar) | 359 '%s.dex.jar' % args.test_jar) |
413 options.uiautomator_info_jar = ( | 360 args.uiautomator_info_jar = ( |
414 options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] + | 361 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] + |
415 '_java.jar') | 362 '_java.jar') |
416 | 363 |
417 return uiautomator_test_options.UIAutomatorOptions( | 364 return uiautomator_test_options.UIAutomatorOptions( |
418 options.tool, | 365 args.tool, |
419 options.cleanup_test_files, | 366 args.cleanup_test_files, |
420 options.annotations, | 367 args.annotations, |
421 options.exclude_annotations, | 368 args.exclude_annotations, |
422 options.test_filter, | 369 args.test_filter, |
423 options.test_data, | 370 args.test_data, |
424 options.save_perf_json, | 371 args.save_perf_json, |
425 options.screenshot_failures, | 372 args.screenshot_failures, |
426 options.uiautomator_jar, | 373 args.uiautomator_jar, |
427 options.uiautomator_info_jar, | 374 args.uiautomator_info_jar, |
428 options.package) | 375 args.package) |
429 | 376 |
430 | 377 |
431 def AddJUnitTestOptions(option_parser): | 378 def AddJUnitTestOptions(parser): |
432 """Adds junit test options to |option_parser|.""" | 379 """Adds junit test options to |parser|.""" |
433 option_parser.usage = '%prog junit -s [test suite name]' | 380 |
434 option_parser.commands_dict = {} | 381 group = parser.add_argument_group('JUnit Test Options') |
435 | 382 group.add_argument( |
436 option_parser.add_option( | 383 '-s', '--test-suite', dest='test_suite', required=True, |
437 '-s', '--test-suite', dest='test_suite', | |
438 help=('JUnit test suite to run.')) | 384 help=('JUnit test suite to run.')) |
439 option_parser.add_option( | 385 group.add_argument( |
440 '-f', '--test-filter', dest='test_filter', | 386 '-f', '--test-filter', dest='test_filter', |
441 help='Filters tests googletest-style.') | 387 help='Filters tests googletest-style.') |
442 option_parser.add_option( | 388 group.add_argument( |
443 '--package-filter', dest='package_filter', | 389 '--package-filter', dest='package_filter', |
444 help='Filters tests by package.') | 390 help='Filters tests by package.') |
445 option_parser.add_option( | 391 group.add_argument( |
446 '--runner-filter', dest='runner_filter', | 392 '--runner-filter', dest='runner_filter', |
447 help='Filters tests by runner class. Must be fully qualified.') | 393 help='Filters tests by runner class. Must be fully qualified.') |
448 option_parser.add_option( | 394 group.add_argument( |
449 '--sdk-version', dest='sdk_version', type="int", | 395 '--sdk-version', dest='sdk_version', type=int, |
450 help='The Android SDK version.') | 396 help='The Android SDK version.') |
451 AddCommonOptions(option_parser) | 397 AddCommonOptions(parser) |
452 | 398 |
453 | 399 |
454 def ProcessJUnitTestOptions(options, error_func): | 400 def AddMonkeyTestOptions(parser): |
455 """Processes all JUnit test options.""" | 401 """Adds monkey test options to |parser|.""" |
456 if not options.test_suite: | 402 |
457 error_func('No test suite specified.') | 403 group = parser.add_argument_group('Monkey Test Options') |
458 return options | 404 group.add_argument( |
459 | 405 '--package', required=True, choices=constants.PACKAGE_INFO.keys(), |
460 | 406 metavar='PACKAGE', help='Package under test.') |
461 def AddMonkeyTestOptions(option_parser): | 407 group.add_argument( |
462 """Adds monkey test options to |option_parser|.""" | 408 '--event-count', default=10000, type=int, |
463 | 409 help='Number of events to generate (default: %(default)s).') |
464 option_parser.usage = '%prog monkey [options]' | 410 group.add_argument( |
465 option_parser.commands_dict = {} | |
466 option_parser.example = ( | |
467 '%prog monkey --package=chrome_shell') | |
468 | |
469 option_parser.add_option( | |
470 '--package', | |
471 help=('Package under test. Possible values: %s' % | |
472 constants.PACKAGE_INFO.keys())) | |
473 option_parser.add_option( | |
474 '--event-count', default=10000, type='int', | |
475 help='Number of events to generate [default: %default].') | |
476 option_parser.add_option( | |
477 '--category', default='', | 411 '--category', default='', |
478 help='A list of allowed categories.') | 412 help='A list of allowed categories.') |
479 option_parser.add_option( | 413 group.add_argument( |
480 '--throttle', default=100, type='int', | 414 '--throttle', default=100, type=int, |
481 help='Delay between events (ms) [default: %default]. ') | 415 help='Delay between events (ms) (default: %(default)s). ') |
482 option_parser.add_option( | 416 group.add_argument( |
483 '--seed', type='int', | 417 '--seed', type=int, |
484 help=('Seed value for pseudo-random generator. Same seed value generates ' | 418 help=('Seed value for pseudo-random generator. Same seed value generates ' |
485 'the same sequence of events. Seed is randomized by default.')) | 419 'the same sequence of events. Seed is randomized by default.')) |
486 option_parser.add_option( | 420 group.add_argument( |
487 '--extra-args', default='', | 421 '--extra-args', default='', |
488 help=('String of other args to pass to the command verbatim ' | 422 help=('String of other args to pass to the command verbatim.')) |
489 '[default: "%default"].')) | 423 |
490 | 424 AddCommonOptions(parser) |
491 AddCommonOptions(option_parser) | 425 AddDeviceOptions(parser) |
492 AddDeviceOptions(option_parser) | 426 |
493 | 427 |
494 | 428 def ProcessMonkeyTestOptions(args): |
495 def ProcessMonkeyTestOptions(options, error_func): | |
496 """Processes all monkey test options. | 429 """Processes all monkey test options. |
497 | 430 |
498 Args: | 431 Args: |
499 options: optparse.Options object. | 432 args: argparse.Namespace object. |
500 error_func: Function to call with the error message in case of an error. | |
501 | 433 |
502 Returns: | 434 Returns: |
503 A MonkeyOptions named tuple which contains all options relevant to | 435 A MonkeyOptions named tuple which contains all options relevant to |
504 monkey tests. | 436 monkey tests. |
505 """ | 437 """ |
506 if not options.package: | 438 # TODO(jbudorick): Handle this directly in argparse with nargs='+' |
507 error_func('--package is required.') | 439 category = args.category |
508 | |
509 if options.package not in constants.PACKAGE_INFO: | |
510 error_func('Invalid package.') | |
511 | |
512 category = options.category | |
513 if category: | 440 if category: |
514 category = options.category.split(',') | 441 category = args.category.split(',') |
515 | 442 |
| 443 # TODO(jbudorick): Get rid of MonkeyOptions. |
516 return monkey_test_options.MonkeyOptions( | 444 return monkey_test_options.MonkeyOptions( |
517 options.verbose_count, | 445 args.verbose_count, |
518 options.package, | 446 args.package, |
519 options.event_count, | 447 args.event_count, |
520 category, | 448 category, |
521 options.throttle, | 449 args.throttle, |
522 options.seed, | 450 args.seed, |
523 options.extra_args) | 451 args.extra_args) |
524 | 452 |
525 | 453 |
526 def AddPerfTestOptions(option_parser): | 454 def AddPerfTestOptions(parser): |
527 """Adds perf test options to |option_parser|.""" | 455 """Adds perf test options to |parser|.""" |
528 | 456 |
529 option_parser.usage = '%prog perf [options]' | 457 group = parser.add_argument_group('Perf Test Options') |
530 option_parser.commands_dict = {} | 458 |
531 option_parser.example = ('%prog perf ' | 459 class SingleStepAction(argparse.Action): |
532 '[--single-step -- command args] or ' | 460 def __call__(self, parser, namespace, values, option_string=None): |
533 '[--steps perf_steps.json] or ' | 461 if values and not namespace.single_step: |
534 '[--print-step step]') | 462 parser.error('single step command provided, ' |
535 | 463 'but --single-step not specified.') |
536 option_parser.add_option( | 464 elif namespace.single_step and not values: |
537 '--single-step', | 465 parser.error('--single-step specified, ' |
538 action='store_true', | 466 'but no single step command provided.') |
| 467 setattr(namespace, self.dest, values) |
| 468 |
| 469 step_group = group.add_mutually_exclusive_group(required=True) |
| 470 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER. |
| 471 # This requires removing "--" from client calls. |
| 472 step_group.add_argument( |
| 473 '--single-step', action='store_true', |
539 help='Execute the given command with retries, but only print the result ' | 474 help='Execute the given command with retries, but only print the result ' |
540 'for the "most successful" round.') | 475 'for the "most successful" round.') |
541 option_parser.add_option( | 476 step_group.add_argument( |
542 '--steps', | 477 '--steps', |
543 help='JSON file containing the list of commands to run.') | 478 help='JSON file containing the list of commands to run.') |
544 option_parser.add_option( | 479 step_group.add_argument( |
| 480 '--print-step', |
| 481 help='The name of a previously executed perf step to print.') |
| 482 |
| 483 group.add_argument( |
| 484 '--output-json-list', |
| 485 help='Write a simple list of names from --steps into the given file.') |
| 486 group.add_argument( |
| 487 '--collect-chartjson-data', |
| 488 action='store_true', |
| 489 help='Cache the chartjson output from each step for later use.') |
| 490 group.add_argument( |
| 491 '--output-chartjson-data', |
| 492 default='', |
| 493 help='Write out chartjson into the given file.') |
| 494 group.add_argument( |
545 '--flaky-steps', | 495 '--flaky-steps', |
546 help=('A JSON file containing steps that are flaky ' | 496 help=('A JSON file containing steps that are flaky ' |
547 'and will have its exit code ignored.')) | 497 'and will have its exit code ignored.')) |
548 option_parser.add_option( | 498 group.add_argument( |
549 '--output-json-list', | |
550 help='Write a simple list of names from --steps into the given file.') | |
551 option_parser.add_option( | |
552 '--collect-chartjson-data', | |
553 action='store_true', | |
554 help='Cache the chartjson output from each step for later use.') | |
555 option_parser.add_option( | |
556 '--output-chartjson-data', | |
557 default='', | |
558 help='Write out chartjson into the given file.') | |
559 option_parser.add_option( | |
560 '--print-step', | |
561 help='The name of a previously executed perf step to print.') | |
562 option_parser.add_option( | |
563 '--no-timeout', action='store_true', | 499 '--no-timeout', action='store_true', |
564 help=('Do not impose a timeout. Each perf step is responsible for ' | 500 help=('Do not impose a timeout. Each perf step is responsible for ' |
565 'implementing the timeout logic.')) | 501 'implementing the timeout logic.')) |
566 option_parser.add_option( | 502 group.add_argument( |
567 '-f', '--test-filter', | 503 '-f', '--test-filter', |
568 help=('Test filter (will match against the names listed in --steps).')) | 504 help=('Test filter (will match against the names listed in --steps).')) |
569 option_parser.add_option( | 505 group.add_argument( |
570 '--dry-run', | 506 '--dry-run', action='store_true', |
571 action='store_true', | |
572 help='Just print the steps without executing.') | 507 help='Just print the steps without executing.') |
573 AddCommonOptions(option_parser) | 508 group.add_argument('single_step_command', nargs='*', action=SingleStepAction, |
574 AddDeviceOptions(option_parser) | 509 help='If --single-step is specified, the command to run.') |
575 | 510 AddCommonOptions(parser) |
576 | 511 AddDeviceOptions(parser) |
577 def ProcessPerfTestOptions(options, args, error_func): | 512 |
| 513 |
| 514 def ProcessPerfTestOptions(args): |
578 """Processes all perf test options. | 515 """Processes all perf test options. |
579 | 516 |
580 Args: | 517 Args: |
581 options: optparse.Options object. | 518 args: argparse.Namespace object. |
582 error_func: Function to call with the error message in case of an error. | |
583 | 519 |
584 Returns: | 520 Returns: |
585 A PerfOptions named tuple which contains all options relevant to | 521 A PerfOptions named tuple which contains all options relevant to |
586 perf tests. | 522 perf tests. |
587 """ | 523 """ |
588 # Only one of steps, print_step or single_step must be provided. | 524 # TODO(jbudorick): Move single_step handling down into the perf tests. |
589 count = len(filter(None, | 525 if args.single_step: |
590 [options.steps, options.print_step, options.single_step])) | 526 args.single_step = ' '.join(args.single_step_command) |
591 if count != 1: | 527 # TODO(jbudorick): Get rid of PerfOptions. |
592 error_func('Please specify one of: --steps, --print-step, --single-step.') | |
593 single_step = None | |
594 if options.single_step: | |
595 single_step = ' '.join(args[2:]) | |
596 return perf_test_options.PerfOptions( | 528 return perf_test_options.PerfOptions( |
597 options.steps, options.flaky_steps, options.output_json_list, | 529 args.steps, args.flaky_steps, args.output_json_list, |
598 options.print_step, options.no_timeout, options.test_filter, | 530 args.print_step, args.no_timeout, args.test_filter, |
599 options.dry_run, single_step, options.collect_chartjson_data, | 531 args.dry_run, args.single_step, args.collect_chartjson_data, |
600 options.output_chartjson_data) | 532 args.output_chartjson_data) |
601 | 533 |
602 | 534 |
603 def AddPythonTestOptions(option_parser): | 535 def AddPythonTestOptions(parser): |
604 option_parser.add_option('-s', '--suite', dest='suite_name', | 536 group = parser.add_argument_group('Python Test Options') |
605 help=('Name of the test suite to run' | 537 group.add_argument( |
606 '(use -s help to list them).')) | 538 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME', |
607 AddCommonOptions(option_parser) | 539 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(), |
608 | 540 help='Name of the test suite to run.') |
609 | 541 AddCommonOptions(parser) |
610 def ProcessPythonTestOptions(options, error_func): | 542 |
611 if options.suite_name not in constants.PYTHON_UNIT_TEST_SUITES: | 543 |
612 available = ('Available test suites: [%s]' % | 544 def _RunGTests(args, devices): |
613 ', '.join(constants.PYTHON_UNIT_TEST_SUITES.iterkeys())) | |
614 if options.suite_name == 'help': | |
615 print available | |
616 else: | |
617 error_func('"%s" is not a valid suite. %s' % | |
618 (options.suite_name, available)) | |
619 | |
620 | |
621 def _RunGTests(options, devices): | |
622 """Subcommand of RunTestsCommands which runs gtests.""" | 545 """Subcommand of RunTestsCommands which runs gtests.""" |
623 ProcessGTestOptions(options) | |
624 | |
625 exit_code = 0 | 546 exit_code = 0 |
626 for suite_name in options.suite_name: | 547 for suite_name in args.suite_name: |
627 # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for | 548 # TODO(jbudorick): Either deprecate multi-suite or move its handling down |
628 # the gtest command. | 549 # into the gtest code. |
629 gtest_options = gtest_test_options.GTestOptions( | 550 gtest_options = gtest_test_options.GTestOptions( |
630 options.tool, | 551 args.tool, |
631 options.cleanup_test_files, | 552 args.cleanup_test_files, |
632 options.test_filter, | 553 args.test_filter, |
633 options.run_disabled, | 554 args.run_disabled, |
634 options.test_arguments, | 555 args.test_arguments, |
635 options.timeout, | 556 args.timeout, |
636 options.isolate_file_path, | 557 args.isolate_file_path, |
637 suite_name) | 558 suite_name) |
638 runner_factory, tests = gtest_setup.Setup(gtest_options, devices) | 559 runner_factory, tests = gtest_setup.Setup(gtest_options, devices) |
639 | 560 |
640 results, test_exit_code = test_dispatcher.RunTests( | 561 results, test_exit_code = test_dispatcher.RunTests( |
641 tests, runner_factory, devices, shard=True, test_timeout=None, | 562 tests, runner_factory, devices, shard=True, test_timeout=None, |
642 num_retries=options.num_retries) | 563 num_retries=args.num_retries) |
643 | 564 |
644 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | 565 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
645 exit_code = test_exit_code | 566 exit_code = test_exit_code |
646 | 567 |
647 report_results.LogFull( | 568 report_results.LogFull( |
648 results=results, | 569 results=results, |
649 test_type='Unit test', | 570 test_type='Unit test', |
650 test_package=suite_name, | 571 test_package=suite_name, |
651 flakiness_server=options.flakiness_dashboard_server) | 572 flakiness_server=args.flakiness_dashboard_server) |
652 | 573 |
653 if options.json_results_file: | 574 if args.json_results_file: |
654 json_results.GenerateJsonResultsFile(results, options.json_results_file) | 575 json_results.GenerateJsonResultsFile(results, args.json_results_file) |
655 | 576 |
656 if os.path.isdir(constants.ISOLATE_DEPS_DIR): | 577 if os.path.isdir(constants.ISOLATE_DEPS_DIR): |
657 shutil.rmtree(constants.ISOLATE_DEPS_DIR) | 578 shutil.rmtree(constants.ISOLATE_DEPS_DIR) |
658 | 579 |
659 return exit_code | 580 return exit_code |
660 | 581 |
661 | 582 |
662 def _RunLinkerTests(options, devices): | 583 def _RunLinkerTests(args, devices): |
663 """Subcommand of RunTestsCommands which runs linker tests.""" | 584 """Subcommand of RunTestsCommands which runs linker tests.""" |
664 runner_factory, tests = linker_setup.Setup(options, devices) | 585 runner_factory, tests = linker_setup.Setup(args, devices) |
665 | 586 |
666 results, exit_code = test_dispatcher.RunTests( | 587 results, exit_code = test_dispatcher.RunTests( |
667 tests, runner_factory, devices, shard=True, test_timeout=60, | 588 tests, runner_factory, devices, shard=True, test_timeout=60, |
668 num_retries=options.num_retries) | 589 num_retries=args.num_retries) |
669 | 590 |
670 report_results.LogFull( | 591 report_results.LogFull( |
671 results=results, | 592 results=results, |
672 test_type='Linker test', | 593 test_type='Linker test', |
673 test_package='ChromiumLinkerTest') | 594 test_package='ChromiumLinkerTest') |
674 | 595 |
675 if options.json_results_file: | 596 if args.json_results_file: |
676 json_results.GenerateJsonResultsFile(results, options.json_results_file) | 597 json_results.GenerateJsonResultsFile(results, args.json_results_file) |
677 | 598 |
678 return exit_code | 599 return exit_code |
679 | 600 |
680 | 601 |
681 def _RunInstrumentationTests(options, error_func, devices): | 602 def _RunInstrumentationTests(args, devices): |
682 """Subcommand of RunTestsCommands which runs instrumentation tests.""" | 603 """Subcommand of RunTestsCommands which runs instrumentation tests.""" |
683 instrumentation_options = ProcessInstrumentationOptions(options, error_func) | 604 logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices))) |
684 | 605 |
685 if len(devices) > 1 and options.wait_for_debugger: | 606 instrumentation_options = ProcessInstrumentationOptions(args) |
| 607 |
| 608 if len(devices) > 1 and args.wait_for_debugger: |
686 logging.warning('Debugger can not be sharded, using first available device') | 609 logging.warning('Debugger can not be sharded, using first available device') |
687 devices = devices[:1] | 610 devices = devices[:1] |
688 | 611 |
689 results = base_test_result.TestRunResults() | 612 results = base_test_result.TestRunResults() |
690 exit_code = 0 | 613 exit_code = 0 |
691 | 614 |
692 if options.run_java_tests: | 615 if args.run_java_tests: |
693 runner_factory, tests = instrumentation_setup.Setup( | 616 runner_factory, tests = instrumentation_setup.Setup( |
694 instrumentation_options, devices) | 617 instrumentation_options, devices) |
695 | 618 |
696 test_results, exit_code = test_dispatcher.RunTests( | 619 test_results, exit_code = test_dispatcher.RunTests( |
697 tests, runner_factory, devices, shard=True, test_timeout=None, | 620 tests, runner_factory, devices, shard=True, test_timeout=None, |
698 num_retries=options.num_retries) | 621 num_retries=args.num_retries) |
699 | 622 |
700 results.AddTestRunResults(test_results) | 623 results.AddTestRunResults(test_results) |
701 | 624 |
702 if options.run_python_tests: | 625 if args.run_python_tests: |
703 runner_factory, tests = host_driven_setup.InstrumentationSetup( | 626 runner_factory, tests = host_driven_setup.InstrumentationSetup( |
704 options.host_driven_root, options.official_build, | 627 args.host_driven_root, args.official_build, |
705 instrumentation_options) | 628 instrumentation_options) |
706 | 629 |
707 if tests: | 630 if tests: |
708 test_results, test_exit_code = test_dispatcher.RunTests( | 631 test_results, test_exit_code = test_dispatcher.RunTests( |
709 tests, runner_factory, devices, shard=True, test_timeout=None, | 632 tests, runner_factory, devices, shard=True, test_timeout=None, |
710 num_retries=options.num_retries) | 633 num_retries=args.num_retries) |
711 | 634 |
712 results.AddTestRunResults(test_results) | 635 results.AddTestRunResults(test_results) |
713 | 636 |
714 # Only allow exit code escalation | 637 # Only allow exit code escalation |
715 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | 638 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
716 exit_code = test_exit_code | 639 exit_code = test_exit_code |
717 | 640 |
718 if options.device_flags: | 641 if args.device_flags: |
719 options.device_flags = os.path.join(constants.DIR_SOURCE_ROOT, | 642 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT, |
720 options.device_flags) | 643 args.device_flags) |
721 | 644 |
722 report_results.LogFull( | 645 report_results.LogFull( |
723 results=results, | 646 results=results, |
724 test_type='Instrumentation', | 647 test_type='Instrumentation', |
725 test_package=os.path.basename(options.test_apk), | 648 test_package=os.path.basename(args.test_apk), |
726 annotation=options.annotations, | 649 annotation=args.annotations, |
727 flakiness_server=options.flakiness_dashboard_server) | 650 flakiness_server=args.flakiness_dashboard_server) |
728 | 651 |
729 if options.json_results_file: | 652 if args.json_results_file: |
730 json_results.GenerateJsonResultsFile(results, options.json_results_file) | 653 json_results.GenerateJsonResultsFile(results, args.json_results_file) |
731 | 654 |
732 return exit_code | 655 return exit_code |
733 | 656 |
734 | 657 |
735 def _RunUIAutomatorTests(options, error_func, devices): | 658 def _RunUIAutomatorTests(args, devices): |
736 """Subcommand of RunTestsCommands which runs uiautomator tests.""" | 659 """Subcommand of RunTestsCommands which runs uiautomator tests.""" |
737 uiautomator_options = ProcessUIAutomatorOptions(options, error_func) | 660 uiautomator_options = ProcessUIAutomatorOptions(args) |
738 | 661 |
739 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) | 662 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) |
740 | 663 |
741 results, exit_code = test_dispatcher.RunTests( | 664 results, exit_code = test_dispatcher.RunTests( |
742 tests, runner_factory, devices, shard=True, test_timeout=None, | 665 tests, runner_factory, devices, shard=True, test_timeout=None, |
743 num_retries=options.num_retries) | 666 num_retries=args.num_retries) |
744 | 667 |
745 report_results.LogFull( | 668 report_results.LogFull( |
746 results=results, | 669 results=results, |
747 test_type='UIAutomator', | 670 test_type='UIAutomator', |
748 test_package=os.path.basename(options.test_jar), | 671 test_package=os.path.basename(args.test_jar), |
749 annotation=options.annotations, | 672 annotation=args.annotations, |
750 flakiness_server=options.flakiness_dashboard_server) | 673 flakiness_server=args.flakiness_dashboard_server) |
751 | 674 |
752 if options.json_results_file: | 675 if args.json_results_file: |
753 json_results.GenerateJsonResultsFile(results, options.json_results_file) | 676 json_results.GenerateJsonResultsFile(results, args.json_results_file) |
754 | 677 |
755 return exit_code | 678 return exit_code |
756 | 679 |
757 | 680 |
758 def _RunJUnitTests(options, error_func): | 681 def _RunJUnitTests(args): |
759 """Subcommand of RunTestsCommand which runs junit tests.""" | 682 """Subcommand of RunTestsCommand which runs junit tests.""" |
760 junit_options = ProcessJUnitTestOptions(options, error_func) | 683 runner_factory, tests = junit_setup.Setup(args) |
761 runner_factory, tests = junit_setup.Setup(junit_options) | |
762 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory) | 684 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory) |
763 | |
764 return exit_code | 685 return exit_code |
765 | 686 |
766 | 687 |
767 def _RunMonkeyTests(options, error_func, devices): | 688 def _RunMonkeyTests(args, devices): |
768 """Subcommand of RunTestsCommands which runs monkey tests.""" | 689 """Subcommand of RunTestsCommands which runs monkey tests.""" |
769 monkey_options = ProcessMonkeyTestOptions(options, error_func) | 690 monkey_options = ProcessMonkeyTestOptions(args) |
770 | 691 |
771 runner_factory, tests = monkey_setup.Setup(monkey_options) | 692 runner_factory, tests = monkey_setup.Setup(monkey_options) |
772 | 693 |
773 results, exit_code = test_dispatcher.RunTests( | 694 results, exit_code = test_dispatcher.RunTests( |
774 tests, runner_factory, devices, shard=False, test_timeout=None, | 695 tests, runner_factory, devices, shard=False, test_timeout=None, |
775 num_retries=options.num_retries) | 696 num_retries=args.num_retries) |
776 | 697 |
777 report_results.LogFull( | 698 report_results.LogFull( |
778 results=results, | 699 results=results, |
779 test_type='Monkey', | 700 test_type='Monkey', |
780 test_package='Monkey') | 701 test_package='Monkey') |
781 | 702 |
782 if options.json_results_file: | 703 if args.json_results_file: |
783 json_results.GenerateJsonResultsFile(results, options.json_results_file) | 704 json_results.GenerateJsonResultsFile(results, args.json_results_file) |
784 | 705 |
785 return exit_code | 706 return exit_code |
786 | 707 |
787 | 708 |
788 def _RunPerfTests(options, args, error_func): | 709 def _RunPerfTests(args): |
789 """Subcommand of RunTestsCommands which runs perf tests.""" | 710 """Subcommand of RunTestsCommands which runs perf tests.""" |
790 perf_options = ProcessPerfTestOptions(options, args, error_func) | 711 perf_options = ProcessPerfTestOptions(args) |
791 | 712 |
792 # Just save a simple json with a list of test names. | 713 # Just save a simple json with a list of test names. |
793 if perf_options.output_json_list: | 714 if perf_options.output_json_list: |
794 return perf_test_runner.OutputJsonList( | 715 return perf_test_runner.OutputJsonList( |
795 perf_options.steps, perf_options.output_json_list) | 716 perf_options.steps, perf_options.output_json_list) |
796 | 717 |
797 if perf_options.output_chartjson_data: | 718 if perf_options.output_chartjson_data: |
798 return perf_test_runner.OutputChartjson( | 719 return perf_test_runner.OutputChartjson( |
799 perf_options.print_step, perf_options.output_chartjson_data) | 720 perf_options.print_step, perf_options.output_chartjson_data) |
800 | 721 |
801 # Just print the results from a single previously executed step. | 722 # Just print the results from a single previously executed step. |
802 if perf_options.print_step: | 723 if perf_options.print_step: |
803 return perf_test_runner.PrintTestOutput(perf_options.print_step) | 724 return perf_test_runner.PrintTestOutput(perf_options.print_step) |
804 | 725 |
805 runner_factory, tests, devices = perf_setup.Setup(perf_options) | 726 runner_factory, tests, devices = perf_setup.Setup(perf_options) |
806 | 727 |
807 # shard=False means that each device will get the full list of tests | 728 # shard=False means that each device will get the full list of tests |
808 # and then each one will decide their own affinity. | 729 # and then each one will decide their own affinity. |
809 # shard=True means each device will pop the next test available from a queue, | 730 # shard=True means each device will pop the next test available from a queue, |
810 # which increases throughput but have no affinity. | 731 # which increases throughput but have no affinity. |
811 results, _ = test_dispatcher.RunTests( | 732 results, _ = test_dispatcher.RunTests( |
812 tests, runner_factory, devices, shard=False, test_timeout=None, | 733 tests, runner_factory, devices, shard=False, test_timeout=None, |
813 num_retries=options.num_retries) | 734 num_retries=args.num_retries) |
814 | 735 |
815 report_results.LogFull( | 736 report_results.LogFull( |
816 results=results, | 737 results=results, |
817 test_type='Perf', | 738 test_type='Perf', |
818 test_package='Perf') | 739 test_package='Perf') |
819 | 740 |
820 if options.json_results_file: | 741 if args.json_results_file: |
821 json_results.GenerateJsonResultsFile(results, options.json_results_file) | 742 json_results.GenerateJsonResultsFile(results, args.json_results_file) |
822 | 743 |
823 if perf_options.single_step: | 744 if perf_options.single_step: |
824 return perf_test_runner.PrintTestOutput('single_step') | 745 return perf_test_runner.PrintTestOutput('single_step') |
825 | 746 |
826 perf_test_runner.PrintSummary(tests) | 747 perf_test_runner.PrintSummary(tests) |
827 | 748 |
828 # Always return 0 on the sharding stage. Individual tests exit_code | 749 # Always return 0 on the sharding stage. Individual tests exit_code |
829 # will be returned on the print_step stage. | 750 # will be returned on the print_step stage. |
830 return 0 | 751 return 0 |
831 | 752 |
832 | 753 |
833 def _RunPythonTests(options, error_func): | 754 def _RunPythonTests(args): |
834 """Subcommand of RunTestsCommand which runs python unit tests.""" | 755 """Subcommand of RunTestsCommand which runs python unit tests.""" |
835 ProcessPythonTestOptions(options, error_func) | 756 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name] |
836 | |
837 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[options.suite_name] | |
838 suite_path = suite_vars['path'] | 757 suite_path = suite_vars['path'] |
839 suite_test_modules = suite_vars['test_modules'] | 758 suite_test_modules = suite_vars['test_modules'] |
840 | 759 |
841 sys.path = [suite_path] + sys.path | 760 sys.path = [suite_path] + sys.path |
842 try: | 761 try: |
843 suite = unittest.TestSuite() | 762 suite = unittest.TestSuite() |
844 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m) | 763 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m) |
845 for m in suite_test_modules) | 764 for m in suite_test_modules) |
846 runner = unittest.TextTestRunner(verbosity=1+options.verbose_count) | 765 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count) |
847 return 0 if runner.run(suite).wasSuccessful() else 1 | 766 return 0 if runner.run(suite).wasSuccessful() else 1 |
848 finally: | 767 finally: |
849 sys.path = sys.path[1:] | 768 sys.path = sys.path[1:] |
850 | 769 |
851 | 770 |
852 def _GetAttachedDevices(test_device=None): | 771 def _GetAttachedDevices(test_device=None): |
853 """Get all attached devices. | 772 """Get all attached devices. |
854 | 773 |
855 Args: | 774 Args: |
856 test_device: Name of a specific device to use. | 775 test_device: Name of a specific device to use. |
857 | 776 |
858 Returns: | 777 Returns: |
859 A list of attached devices. | 778 A list of attached devices. |
860 """ | 779 """ |
861 attached_devices = [] | 780 attached_devices = [] |
862 | 781 |
863 attached_devices = android_commands.GetAttachedDevices() | 782 attached_devices = android_commands.GetAttachedDevices() |
864 if test_device: | 783 if test_device: |
865 assert test_device in attached_devices, ( | 784 assert test_device in attached_devices, ( |
866 'Did not find device %s among attached device. Attached devices: %s' | 785 'Did not find device %s among attached device. Attached devices: %s' |
867 % (test_device, ', '.join(attached_devices))) | 786 % (test_device, ', '.join(attached_devices))) |
868 attached_devices = [test_device] | 787 attached_devices = [test_device] |
869 | 788 |
870 assert attached_devices, 'No devices attached.' | 789 assert attached_devices, 'No devices attached.' |
871 | 790 |
872 return sorted(attached_devices) | 791 return sorted(attached_devices) |
873 | 792 |
874 | 793 |
875 def RunTestsCommand(command, options, args, option_parser): | 794 def RunTestsCommand(args, parser): |
876 """Checks test type and dispatches to the appropriate function. | 795 """Checks test type and dispatches to the appropriate function. |
877 | 796 |
878 Args: | 797 Args: |
879 command: String indicating the command that was received to trigger | 798 args: argparse.Namespace object. |
880 this function. | 799 parser: argparse.ArgumentParser object. |
881 options: optparse options dictionary. | |
882 args: List of extra args from optparse. | |
883 option_parser: optparse.OptionParser object. | |
884 | 800 |
885 Returns: | 801 Returns: |
886 Integer indicated exit code. | 802 Integer indicated exit code. |
887 | 803 |
888 Raises: | 804 Raises: |
889 Exception: Unknown command name passed in, or an exception from an | 805 Exception: Unknown command name passed in, or an exception from an |
890 individual test runner. | 806 individual test runner. |
891 """ | 807 """ |
| 808 command = args.command |
892 | 809 |
893 # Check for extra arguments | 810 ProcessCommonOptions(args) |
894 if len(args) > 2 and command != 'perf': | |
895 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:]))) | |
896 return constants.ERROR_EXIT_CODE | |
897 if command == 'perf': | |
898 if ((options.single_step and len(args) <= 2) or | |
899 (not options.single_step and len(args) > 2)): | |
900 option_parser.error('Unrecognized arguments: %s' % (' '.join(args))) | |
901 return constants.ERROR_EXIT_CODE | |
902 | 811 |
903 ProcessCommonOptions(options, option_parser.error) | 812 if args.enable_platform_mode: |
904 | 813 return RunTestsInPlatformMode(args, parser.error) |
905 if options.enable_platform_mode: | |
906 return RunTestsInPlatformMode(command, options, option_parser) | |
907 | 814 |
908 if command in constants.LOCAL_MACHINE_TESTS: | 815 if command in constants.LOCAL_MACHINE_TESTS: |
909 devices = [] | 816 devices = [] |
910 else: | 817 else: |
911 devices = _GetAttachedDevices(options.test_device) | 818 devices = _GetAttachedDevices(args.test_device) |
912 | 819 |
913 forwarder.Forwarder.RemoveHostLog() | 820 forwarder.Forwarder.RemoveHostLog() |
914 if not ports.ResetTestServerPortAllocation(): | 821 if not ports.ResetTestServerPortAllocation(): |
915 raise Exception('Failed to reset test server port.') | 822 raise Exception('Failed to reset test server port.') |
916 | 823 |
917 if command == 'gtest': | 824 if command == 'gtest': |
918 return _RunGTests(options, devices) | 825 return _RunGTests(args, devices) |
919 elif command == 'linker': | 826 elif command == 'linker': |
920 return _RunLinkerTests(options, devices) | 827 return _RunLinkerTests(args, devices) |
921 elif command == 'instrumentation': | 828 elif command == 'instrumentation': |
922 return _RunInstrumentationTests(options, option_parser.error, devices) | 829 return _RunInstrumentationTests(args, devices) |
923 elif command == 'uiautomator': | 830 elif command == 'uiautomator': |
924 return _RunUIAutomatorTests(options, option_parser.error, devices) | 831 return _RunUIAutomatorTests(args, devices) |
925 elif command == 'junit': | 832 elif command == 'junit': |
926 return _RunJUnitTests(options, option_parser.error) | 833 return _RunJUnitTests(args) |
927 elif command == 'monkey': | 834 elif command == 'monkey': |
928 return _RunMonkeyTests(options, option_parser.error, devices) | 835 return _RunMonkeyTests(args, devices) |
929 elif command == 'perf': | 836 elif command == 'perf': |
930 return _RunPerfTests(options, args, option_parser.error) | 837 return _RunPerfTests(args) |
931 elif command == 'python': | 838 elif command == 'python': |
932 return _RunPythonTests(options, option_parser.error) | 839 return _RunPythonTests(args) |
933 else: | 840 else: |
934 raise Exception('Unknown test type.') | 841 raise Exception('Unknown test type.') |
935 | 842 |
936 | 843 |
937 _SUPPORTED_IN_PLATFORM_MODE = [ | 844 _SUPPORTED_IN_PLATFORM_MODE = [ |
938 # TODO(jbudorick): Add support for more test types. | 845 # TODO(jbudorick): Add support for more test types. |
939 'gtest', | 846 'gtest', |
940 ] | 847 ] |
941 | 848 |
942 | 849 |
943 def RunTestsInPlatformMode(command, options, option_parser): | 850 def RunTestsInPlatformMode(args, parser): |
944 | 851 |
945 if command not in _SUPPORTED_IN_PLATFORM_MODE: | 852 if args.command not in _SUPPORTED_IN_PLATFORM_MODE: |
946 option_parser.error('%s is not yet supported in platform mode' % command) | 853 parser.error('%s is not yet supported in platform mode' % args.command) |
947 | 854 |
948 with environment_factory.CreateEnvironment( | 855 with environment_factory.CreateEnvironment(args, parser.error) as env: |
949 command, options, option_parser.error) as env: | 856 with test_instance_factory.CreateTestInstance(args, parser.error) as test: |
950 with test_instance_factory.CreateTestInstance( | |
951 command, options, option_parser.error) as test: | |
952 with test_run_factory.CreateTestRun( | 857 with test_run_factory.CreateTestRun( |
953 options, env, test, option_parser.error) as test_run: | 858 args, env, test, parser.error) as test_run: |
954 results = test_run.RunTests() | 859 results = test_run.RunTests() |
955 | 860 |
956 report_results.LogFull( | 861 report_results.LogFull( |
957 results=results, | 862 results=results, |
958 test_type=test.TestType(), | 863 test_type=test.TestType(), |
959 test_package=test_run.TestPackage(), | 864 test_package=test_run.TestPackage(), |
960 annotation=options.annotations, | 865 annotation=args.annotations, |
961 flakiness_server=options.flakiness_dashboard_server) | 866 flakiness_server=args.flakiness_dashboard_server) |
962 | 867 |
963 if options.json_results_file: | 868 if args.json_results_file: |
964 json_results.GenerateJsonResultsFile( | 869 json_results.GenerateJsonResultsFile( |
965 results, options.json_results_file) | 870 results, args.json_results_file) |
966 | 871 |
967 return results | 872 return results |
968 | 873 |
969 | 874 |
970 def HelpCommand(command, _options, args, option_parser): | 875 CommandConfigTuple = collections.namedtuple( |
971 """Display help for a certain command, or overall help. | 876 'CommandConfigTuple', |
972 | 877 ['add_options_func', 'help_txt']) |
973 Args: | |
974 command: String indicating the command that was received to trigger | |
975 this function. | |
976 options: optparse options dictionary. unused. | |
977 args: List of extra args from optparse. | |
978 option_parser: optparse.OptionParser object. | |
979 | |
980 Returns: | |
981 Integer indicated exit code. | |
982 """ | |
983 # If we don't have any args, display overall help | |
984 if len(args) < 3: | |
985 option_parser.print_help() | |
986 return 0 | |
987 # If we have too many args, print an error | |
988 if len(args) > 3: | |
989 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:]))) | |
990 return constants.ERROR_EXIT_CODE | |
991 | |
992 command = args[2] | |
993 | |
994 if command not in VALID_COMMANDS: | |
995 option_parser.error('Unrecognized command.') | |
996 | |
997 # Treat the help command as a special case. We don't care about showing a | |
998 # specific help page for itself. | |
999 if command == 'help': | |
1000 option_parser.print_help() | |
1001 return 0 | |
1002 | |
1003 VALID_COMMANDS[command].add_options_func(option_parser) | |
1004 option_parser.usage = '%prog ' + command + ' [options]' | |
1005 option_parser.commands_dict = {} | |
1006 option_parser.print_help() | |
1007 | |
1008 return 0 | |
1009 | |
1010 | |
1011 # Define a named tuple for the values in the VALID_COMMANDS dictionary so the | |
1012 # syntax is a bit prettier. The tuple is two functions: (add options, run | |
1013 # command). | |
1014 CommandFunctionTuple = collections.namedtuple( | |
1015 'CommandFunctionTuple', ['add_options_func', 'run_command_func']) | |
1016 VALID_COMMANDS = { | 878 VALID_COMMANDS = { |
1017 'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand), | 879 'gtest': CommandConfigTuple( |
1018 'instrumentation': CommandFunctionTuple( | 880 AddGTestOptions, |
1019 AddInstrumentationTestOptions, RunTestsCommand), | 881 'googletest-based C++ tests'), |
1020 'uiautomator': CommandFunctionTuple( | 882 'instrumentation': CommandConfigTuple( |
1021 AddUIAutomatorTestOptions, RunTestsCommand), | 883 AddInstrumentationTestOptions, |
1022 'junit': CommandFunctionTuple( | 884 'InstrumentationTestCase-based Java tests'), |
1023 AddJUnitTestOptions, RunTestsCommand), | 885 'uiautomator': CommandConfigTuple( |
1024 'monkey': CommandFunctionTuple( | 886 AddUIAutomatorTestOptions, |
1025 AddMonkeyTestOptions, RunTestsCommand), | 887 "Tests that run via Android's uiautomator command"), |
1026 'perf': CommandFunctionTuple( | 888 'junit': CommandConfigTuple( |
1027 AddPerfTestOptions, RunTestsCommand), | 889 AddJUnitTestOptions, |
1028 'python': CommandFunctionTuple( | 890 'JUnit4-based Java tests'), |
1029 AddPythonTestOptions, RunTestsCommand), | 891 'monkey': CommandConfigTuple( |
1030 'linker': CommandFunctionTuple( | 892 AddMonkeyTestOptions, |
1031 AddLinkerTestOptions, RunTestsCommand), | 893 "Tests based on Android's monkey"), |
1032 'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand) | 894 'perf': CommandConfigTuple( |
1033 } | 895 AddPerfTestOptions, |
| 896 'Performance tests'), |
| 897 'python': CommandConfigTuple( |
| 898 AddPythonTestOptions, |
| 899 'Python tests based on unittest.TestCase'), |
| 900 'linker': CommandConfigTuple( |
| 901 AddLinkerTestOptions, |
| 902 'Linker tests'), |
| 903 } |
1034 | 904 |
1035 | 905 |
1036 def DumpThreadStacks(_signal, _frame): | 906 def DumpThreadStacks(_signal, _frame): |
1037 for thread in threading.enumerate(): | 907 for thread in threading.enumerate(): |
1038 reraiser_thread.LogThreadStack(thread) | 908 reraiser_thread.LogThreadStack(thread) |
1039 | 909 |
1040 | 910 |
1041 def main(): | 911 def main(): |
1042 signal.signal(signal.SIGUSR1, DumpThreadStacks) | 912 signal.signal(signal.SIGUSR1, DumpThreadStacks) |
1043 option_parser = command_option_parser.CommandOptionParser( | 913 |
1044 commands_dict=VALID_COMMANDS) | 914 parser = argparse.ArgumentParser() |
1045 return command_option_parser.ParseAndExecute(option_parser) | 915 command_parsers = parser.add_subparsers(title='test types', |
| 916 dest='command') |
| 917 |
| 918 for test_type, config in sorted(VALID_COMMANDS.iteritems(), |
| 919 key=lambda x: x[0]): |
| 920 subparser = command_parsers.add_parser( |
| 921 test_type, usage='%(prog)s [options]', help=config.help_txt) |
| 922 config.add_options_func(subparser) |
| 923 |
| 924 args = parser.parse_args() |
| 925 RunTestsCommand(args, parser) |
| 926 |
| 927 return 0 |
1046 | 928 |
1047 | 929 |
1048 if __name__ == '__main__': | 930 if __name__ == '__main__': |
1049 sys.exit(main()) | 931 sys.exit(main()) |
OLD | NEW |