Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(383)

Side by Side Diff: build/android/test_runner.py

Issue 761903003: Update from https://crrev.com/306655 (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « build/android/setup.gyp ('k') | build/common.gypi » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 #!/usr/bin/env python 1 #!/usr/bin/env python
2 # 2 #
3 # Copyright 2013 The Chromium Authors. All rights reserved. 3 # Copyright 2013 The Chromium Authors. All rights reserved.
4 # Use of this source code is governed by a BSD-style license that can be 4 # Use of this source code is governed by a BSD-style license that can be
5 # found in the LICENSE file. 5 # found in the LICENSE file.
6 6
7 """Runs all types of tests from one unified interface.""" 7 """Runs all types of tests from one unified interface."""
8 8
9 import argparse
9 import collections 10 import collections
10 import logging 11 import logging
11 import optparse
12 import os 12 import os
13 import shutil 13 import shutil
14 import signal 14 import signal
15 import sys 15 import sys
16 import threading 16 import threading
17 import unittest 17 import unittest
18 18
19 from pylib import android_commands 19 from pylib import android_commands
20 from pylib import constants 20 from pylib import constants
21 from pylib import forwarder 21 from pylib import forwarder
(...skipping 10 matching lines...) Expand all
32 from pylib.host_driven import setup as host_driven_setup 32 from pylib.host_driven import setup as host_driven_setup
33 from pylib.instrumentation import setup as instrumentation_setup 33 from pylib.instrumentation import setup as instrumentation_setup
34 from pylib.instrumentation import test_options as instrumentation_test_options 34 from pylib.instrumentation import test_options as instrumentation_test_options
35 from pylib.junit import setup as junit_setup 35 from pylib.junit import setup as junit_setup
36 from pylib.junit import test_dispatcher as junit_dispatcher 36 from pylib.junit import test_dispatcher as junit_dispatcher
37 from pylib.monkey import setup as monkey_setup 37 from pylib.monkey import setup as monkey_setup
38 from pylib.monkey import test_options as monkey_test_options 38 from pylib.monkey import test_options as monkey_test_options
39 from pylib.perf import setup as perf_setup 39 from pylib.perf import setup as perf_setup
40 from pylib.perf import test_options as perf_test_options 40 from pylib.perf import test_options as perf_test_options
41 from pylib.perf import test_runner as perf_test_runner 41 from pylib.perf import test_runner as perf_test_runner
42 from pylib.results import json_results
43 from pylib.results import report_results
42 from pylib.uiautomator import setup as uiautomator_setup 44 from pylib.uiautomator import setup as uiautomator_setup
43 from pylib.uiautomator import test_options as uiautomator_test_options 45 from pylib.uiautomator import test_options as uiautomator_test_options
44 from pylib.utils import apk_helper 46 from pylib.utils import apk_helper
45 from pylib.utils import command_option_parser
46 from pylib.utils import report_results
47 from pylib.utils import reraiser_thread 47 from pylib.utils import reraiser_thread
48 from pylib.utils import run_tests_helper 48 from pylib.utils import run_tests_helper
49 49
50 50
51 def AddCommonOptions(option_parser): 51 def AddCommonOptions(parser):
52 """Adds all common options to |option_parser|.""" 52 """Adds all common options to |parser|."""
53 53
54 group = optparse.OptionGroup(option_parser, 'Common Options') 54 group = parser.add_argument_group('Common Options')
55
55 default_build_type = os.environ.get('BUILDTYPE', 'Debug') 56 default_build_type = os.environ.get('BUILDTYPE', 'Debug')
56 group.add_option('--debug', action='store_const', const='Debug', 57
57 dest='build_type', default=default_build_type, 58 debug_or_release_group = group.add_mutually_exclusive_group()
58 help=('If set, run test suites under out/Debug. ' 59 debug_or_release_group.add_argument(
59 'Default is env var BUILDTYPE or Debug.')) 60 '--debug', action='store_const', const='Debug', dest='build_type',
60 group.add_option('--release', action='store_const', 61 default=default_build_type,
61 const='Release', dest='build_type', 62 help=('If set, run test suites under out/Debug. '
62 help=('If set, run test suites under out/Release.' 63 'Default is env var BUILDTYPE or Debug.'))
63 ' Default is env var BUILDTYPE or Debug.')) 64 debug_or_release_group.add_argument(
64 group.add_option('--build-directory', dest='build_directory', 65 '--release', action='store_const', const='Release', dest='build_type',
65 help=('Path to the directory in which build files are' 66 help=('If set, run test suites under out/Release. '
66 ' located (should not include build type)')) 67 'Default is env var BUILDTYPE or Debug.'))
67 group.add_option('--output-directory', dest='output_directory', 68
68 help=('Path to the directory in which build files are' 69 group.add_argument('--build-directory', dest='build_directory',
69 ' located (must include build type). This will take' 70 help=('Path to the directory in which build files are'
70 ' precedence over --debug, --release and' 71 ' located (should not include build type)'))
71 ' --build-directory')) 72 group.add_argument('--output-directory', dest='output_directory',
72 group.add_option('--num_retries', dest='num_retries', type='int', 73 help=('Path to the directory in which build files are'
73 default=2, 74 ' located (must include build type). This will take'
74 help=('Number of retries for a test before ' 75 ' precedence over --debug, --release and'
75 'giving up.')) 76 ' --build-directory'))
76 group.add_option('-v', 77 group.add_argument('--num_retries', dest='num_retries', type=int, default=2,
77 '--verbose', 78 help=('Number of retries for a test before '
78 dest='verbose_count', 79 'giving up (default: %(default)s).'))
79 default=0, 80 group.add_argument('-v',
80 action='count', 81 '--verbose',
81 help='Verbose level (multiple times for more)') 82 dest='verbose_count',
82 group.add_option('--flakiness-dashboard-server', 83 default=0,
83 dest='flakiness_dashboard_server', 84 action='count',
84 help=('Address of the server that is hosting the ' 85 help='Verbose level (multiple times for more)')
85 'Chrome for Android flakiness dashboard.')) 86 group.add_argument('--flakiness-dashboard-server',
86 group.add_option('--enable-platform-mode', action='store_true', 87 dest='flakiness_dashboard_server',
87 help=('Run the test scripts in platform mode, which ' 88 help=('Address of the server that is hosting the '
88 'conceptually separates the test runner from the ' 89 'Chrome for Android flakiness dashboard.'))
89 '"device" (local or remote, real or emulated) on ' 90 group.add_argument('--enable-platform-mode', action='store_true',
90 'which the tests are running. [experimental]')) 91 help=('Run the test scripts in platform mode, which '
91 group.add_option('-e', '--environment', default='local', 92 'conceptually separates the test runner from the '
92 help=('Test environment to run in. Must be one of: %s' % 93 '"device" (local or remote, real or emulated) on '
93 ', '.join(constants.VALID_ENVIRONMENTS))) 94 'which the tests are running. [experimental]'))
94 group.add_option('--adb-path', 95 group.add_argument('-e', '--environment', default='local',
95 help=('Specify the absolute path of the adb binary that ' 96 choices=constants.VALID_ENVIRONMENTS,
96 'should be used.')) 97 help='Test environment to run in (default: %(default)s).')
97 option_parser.add_option_group(group) 98 group.add_argument('--adb-path',
99 help=('Specify the absolute path of the adb binary that '
100 'should be used.'))
101 group.add_argument('--json-results-file', dest='json_results_file',
102 help='If set, will dump results in JSON form '
103 'to specified file.')
98 104
99 105
100 def ProcessCommonOptions(options, error_func): 106 def ProcessCommonOptions(args):
101 """Processes and handles all common options.""" 107 """Processes and handles all common options."""
102 run_tests_helper.SetLogLevel(options.verbose_count) 108 run_tests_helper.SetLogLevel(args.verbose_count)
103 constants.SetBuildType(options.build_type) 109 constants.SetBuildType(args.build_type)
104 if options.build_directory: 110 if args.build_directory:
105 constants.SetBuildDirectory(options.build_directory) 111 constants.SetBuildDirectory(args.build_directory)
106 if options.output_directory: 112 if args.output_directory:
107 constants.SetOutputDirectort(options.output_directory) 113 constants.SetOutputDirectort(args.output_directory)
108 if options.adb_path: 114 if args.adb_path:
109 constants.SetAdbPath(options.adb_path) 115 constants.SetAdbPath(args.adb_path)
110 # Some things such as Forwarder require ADB to be in the environment path. 116 # Some things such as Forwarder require ADB to be in the environment path.
111 adb_dir = os.path.dirname(constants.GetAdbPath()) 117 adb_dir = os.path.dirname(constants.GetAdbPath())
112 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep): 118 if adb_dir and adb_dir not in os.environ['PATH'].split(os.pathsep):
113 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH'] 119 os.environ['PATH'] = adb_dir + os.pathsep + os.environ['PATH']
114 if options.environment not in constants.VALID_ENVIRONMENTS:
115 error_func('--environment must be one of: %s' %
116 ', '.join(constants.VALID_ENVIRONMENTS))
117 120
118 121
119 def AddDeviceOptions(option_parser): 122 def AddDeviceOptions(parser):
120 group = optparse.OptionGroup(option_parser, 'Device Options') 123 """Adds device options to |parser|."""
121 group.add_option('-c', dest='cleanup_test_files', 124 group = parser.add_argument_group(title='Device Options')
122 help='Cleanup test files on the device after run', 125 group.add_argument('-c', dest='cleanup_test_files',
123 action='store_true') 126 help='Cleanup test files on the device after run',
124 group.add_option('--tool', 127 action='store_true')
125 dest='tool', 128 group.add_argument('--tool',
126 help=('Run the test under a tool ' 129 dest='tool',
127 '(use --tool help to list them)')) 130 help=('Run the test under a tool '
128 group.add_option('-d', '--device', dest='test_device', 131 '(use --tool help to list them)'))
129 help=('Target device for the test suite ' 132 group.add_argument('-d', '--device', dest='test_device',
130 'to run on.')) 133 help=('Target device for the test suite '
131 option_parser.add_option_group(group) 134 'to run on.'))
132 135
133 136
134 def AddGTestOptions(option_parser): 137 def AddGTestOptions(parser):
135 """Adds gtest options to |option_parser|.""" 138 """Adds gtest options to |parser|."""
136 139
137 option_parser.usage = '%prog gtest [options]' 140 gtest_suites = list(gtest_config.STABLE_TEST_SUITES
138 option_parser.commands_dict = {} 141 + gtest_config.EXPERIMENTAL_TEST_SUITES)
139 option_parser.example = '%prog gtest -s base_unittests'
140 142
141 # TODO(gkanwar): Make this option required 143 group = parser.add_argument_group('GTest Options')
142 option_parser.add_option('-s', '--suite', dest='suite_name', 144 group.add_argument('-s', '--suite', dest='suite_name',
143 help=('Executable name of the test suite to run ' 145 nargs='+', metavar='SUITE_NAME', required=True,
144 '(use -s help to list them).')) 146 help=('Executable name of the test suite to run. '
145 option_parser.add_option('-f', '--gtest_filter', '--gtest-filter', 147 'Available suites include (but are not limited to): '
146 dest='test_filter', 148 '%s' % ', '.join('"%s"' % s for s in gtest_suites)))
147 help='googletest-style filter string.') 149 group.add_argument('-f', '--gtest_filter', '--gtest-filter',
148 option_parser.add_option('--gtest_also_run_disabled_tests', 150 dest='test_filter',
149 '--gtest-also-run-disabled-tests', 151 help='googletest-style filter string.')
150 dest='run_disabled', action='store_true', 152 group.add_argument('--gtest_also_run_disabled_tests',
151 help='Also run disabled tests if applicable.') 153 '--gtest-also-run-disabled-tests',
152 option_parser.add_option('-a', '--test-arguments', dest='test_arguments', 154 dest='run_disabled', action='store_true',
153 default='', 155 help='Also run disabled tests if applicable.')
154 help='Additional arguments to pass to the test.') 156 group.add_argument('-a', '--test-arguments', dest='test_arguments',
155 option_parser.add_option('-t', dest='timeout', 157 default='',
156 help='Timeout to wait for each test', 158 help='Additional arguments to pass to the test.')
157 type='int', 159 group.add_argument('-t', dest='timeout', type=int, default=60,
158 default=60) 160 help='Timeout to wait for each test '
159 option_parser.add_option('--isolate_file_path', 161 '(default: %(default)s).')
160 '--isolate-file-path', 162 group.add_argument('--isolate_file_path',
161 dest='isolate_file_path', 163 '--isolate-file-path',
162 help='.isolate file path to override the default ' 164 dest='isolate_file_path',
163 'path') 165 help='.isolate file path to override the default '
164 # TODO(gkanwar): Move these to Common Options once we have the plumbing 166 'path')
165 # in our other test types to handle these commands 167 AddDeviceOptions(parser)
166 AddCommonOptions(option_parser) 168 AddCommonOptions(parser)
167 AddDeviceOptions(option_parser)
168 169
169 170
170 def AddLinkerTestOptions(option_parser): 171 def AddLinkerTestOptions(parser):
171 option_parser.usage = '%prog linker' 172 group = parser.add_argument_group('Linker Test Options')
172 option_parser.commands_dict = {} 173 group.add_argument('-f', '--gtest-filter', dest='test_filter',
173 option_parser.example = '%prog linker' 174 help='googletest-style filter string.')
174 175 AddCommonOptions(parser)
175 option_parser.add_option('-f', '--gtest-filter', dest='test_filter', 176 AddDeviceOptions(parser)
176 help='googletest-style filter string.')
177 AddCommonOptions(option_parser)
178 AddDeviceOptions(option_parser)
179 177
180 178
181 def ProcessGTestOptions(options): 179 def AddJavaTestOptions(argument_group):
182 """Intercept test suite help to list test suites.
183
184 Args:
185 options: Command line options.
186 """
187 if options.suite_name == 'help':
188 print 'Available test suites are:'
189 for test_suite in (gtest_config.STABLE_TEST_SUITES +
190 gtest_config.EXPERIMENTAL_TEST_SUITES):
191 print test_suite
192 sys.exit(0)
193
194 # Convert to a list, assuming all test suites if nothing was specified.
195 # TODO(gkanwar): Require having a test suite
196 if options.suite_name:
197 options.suite_name = [options.suite_name]
198 else:
199 options.suite_name = [s for s in gtest_config.STABLE_TEST_SUITES]
200
201
202 def AddJavaTestOptions(option_parser):
203 """Adds the Java test options to |option_parser|.""" 180 """Adds the Java test options to |option_parser|."""
204 181
205 option_parser.add_option('-f', '--test-filter', dest='test_filter', 182 argument_group.add_argument(
206 help=('Test filter (if not fully qualified, ' 183 '-f', '--test-filter', dest='test_filter',
207 'will run all matches).')) 184 help=('Test filter (if not fully qualified, will run all matches).'))
208 option_parser.add_option( 185 argument_group.add_argument(
209 '-A', '--annotation', dest='annotation_str', 186 '-A', '--annotation', dest='annotation_str',
210 help=('Comma-separated list of annotations. Run only tests with any of ' 187 help=('Comma-separated list of annotations. Run only tests with any of '
211 'the given annotations. An annotation can be either a key or a ' 188 'the given annotations. An annotation can be either a key or a '
212 'key-values pair. A test that has no annotation is considered ' 189 'key-values pair. A test that has no annotation is considered '
213 '"SmallTest".')) 190 '"SmallTest".'))
214 option_parser.add_option( 191 argument_group.add_argument(
215 '-E', '--exclude-annotation', dest='exclude_annotation_str', 192 '-E', '--exclude-annotation', dest='exclude_annotation_str',
216 help=('Comma-separated list of annotations. Exclude tests with these ' 193 help=('Comma-separated list of annotations. Exclude tests with these '
217 'annotations.')) 194 'annotations.'))
218 option_parser.add_option( 195 argument_group.add_argument(
219 '--screenshot', dest='screenshot_failures', action='store_true', 196 '--screenshot', dest='screenshot_failures', action='store_true',
220 help='Capture screenshots of test failures') 197 help='Capture screenshots of test failures')
221 option_parser.add_option( 198 argument_group.add_argument(
222 '--save-perf-json', action='store_true', 199 '--save-perf-json', action='store_true',
223 help='Saves the JSON file for each UI Perf test.') 200 help='Saves the JSON file for each UI Perf test.')
224 option_parser.add_option( 201 argument_group.add_argument(
225 '--official-build', action='store_true', help='Run official build tests.') 202 '--official-build', action='store_true', help='Run official build tests.')
226 option_parser.add_option( 203 argument_group.add_argument(
227 '--test_data', '--test-data', action='append', default=[], 204 '--test_data', '--test-data', action='append', default=[],
228 help=('Each instance defines a directory of test data that should be ' 205 help=('Each instance defines a directory of test data that should be '
229 'copied to the target(s) before running the tests. The argument ' 206 'copied to the target(s) before running the tests. The argument '
230 'should be of the form <target>:<source>, <target> is relative to ' 207 'should be of the form <target>:<source>, <target> is relative to '
231 'the device data directory, and <source> is relative to the ' 208 'the device data directory, and <source> is relative to the '
232 'chromium build directory.')) 209 'chromium build directory.'))
233 210
234 211
235 def ProcessJavaTestOptions(options): 212 def ProcessJavaTestOptions(args):
236 """Processes options/arguments and populates |options| with defaults.""" 213 """Processes options/arguments and populates |options| with defaults."""
237 214
238 if options.annotation_str: 215 # TODO(jbudorick): Handle most of this function in argparse.
239 options.annotations = options.annotation_str.split(',') 216 if args.annotation_str:
240 elif options.test_filter: 217 args.annotations = args.annotation_str.split(',')
241 options.annotations = [] 218 elif args.test_filter:
219 args.annotations = []
242 else: 220 else:
243 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', 221 args.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest',
244 'EnormousTest', 'IntegrationTest'] 222 'EnormousTest', 'IntegrationTest']
245 223
246 if options.exclude_annotation_str: 224 if args.exclude_annotation_str:
247 options.exclude_annotations = options.exclude_annotation_str.split(',') 225 args.exclude_annotations = args.exclude_annotation_str.split(',')
248 else: 226 else:
249 options.exclude_annotations = [] 227 args.exclude_annotations = []
250 228
251 229
252 def AddInstrumentationTestOptions(option_parser): 230 def AddInstrumentationTestOptions(parser):
253 """Adds Instrumentation test options to |option_parser|.""" 231 """Adds Instrumentation test options to |parser|."""
254 232
255 option_parser.usage = '%prog instrumentation [options]' 233 parser.usage = '%(prog)s [options]'
256 option_parser.commands_dict = {} 234
257 option_parser.example = ('%prog instrumentation ' 235 group = parser.add_argument_group('Instrumentation Test Options')
258 '--test-apk=ChromeShellTest') 236 AddJavaTestOptions(group)
259 237
260 AddJavaTestOptions(option_parser) 238 java_or_python_group = group.add_mutually_exclusive_group()
261 AddCommonOptions(option_parser) 239 java_or_python_group.add_argument(
262 AddDeviceOptions(option_parser) 240 '-j', '--java-only', action='store_false',
263 241 dest='run_python_tests', default=True, help='Run only the Java tests.')
264 option_parser.add_option('-j', '--java-only', action='store_true', 242 java_or_python_group.add_argument(
265 default=False, help='Run only the Java tests.') 243 '-p', '--python-only', action='store_false',
266 option_parser.add_option('-p', '--python-only', action='store_true', 244 dest='run_java_tests', default=True,
267 default=False, 245 help='Run only the host-driven tests.')
268 help='Run only the host-driven tests.') 246
269 option_parser.add_option('--host-driven-root', 247 group.add_argument('--host-driven-root',
270 help='Root of the host-driven tests.') 248 help='Root of the host-driven tests.')
271 option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger', 249 group.add_argument('-w', '--wait_debugger', dest='wait_for_debugger',
272 action='store_true', 250 action='store_true',
273 help='Wait for debugger.') 251 help='Wait for debugger.')
274 option_parser.add_option( 252 group.add_argument('--test-apk', dest='test_apk', required=True,
275 '--test-apk', dest='test_apk', 253 help=('The name of the apk containing the tests '
276 help=('The name of the apk containing the tests ' 254 '(without the .apk extension; '
277 '(without the .apk extension; e.g. "ContentShellTest").')) 255 'e.g. "ContentShellTest").'))
278 option_parser.add_option('--coverage-dir', 256 group.add_argument('--coverage-dir',
279 help=('Directory in which to place all generated ' 257 help=('Directory in which to place all generated '
280 'EMMA coverage files.')) 258 'EMMA coverage files.'))
281 option_parser.add_option('--device-flags', dest='device_flags', default='', 259 group.add_argument('--device-flags', dest='device_flags', default='',
282 help='The relative filepath to a file containing ' 260 help='The relative filepath to a file containing '
283 'command-line flags to set on the device') 261 'command-line flags to set on the device')
284 option_parser.add_option('--isolate_file_path', 262 group.add_argument('--isolate_file_path',
285 '--isolate-file-path', 263 '--isolate-file-path',
286 dest='isolate_file_path', 264 dest='isolate_file_path',
287 help='.isolate file path to override the default ' 265 help='.isolate file path to override the default '
288 'path') 266 'path')
289 267
290 268 AddCommonOptions(parser)
291 def ProcessInstrumentationOptions(options, error_func): 269 AddDeviceOptions(parser)
270
271
272 def ProcessInstrumentationOptions(args):
292 """Processes options/arguments and populate |options| with defaults. 273 """Processes options/arguments and populate |options| with defaults.
293 274
294 Args: 275 Args:
295 options: optparse.Options object. 276 args: argparse.Namespace object.
296 error_func: Function to call with the error message in case of an error.
297 277
298 Returns: 278 Returns:
299 An InstrumentationOptions named tuple which contains all options relevant to 279 An InstrumentationOptions named tuple which contains all options relevant to
300 instrumentation tests. 280 instrumentation tests.
301 """ 281 """
302 282
303 ProcessJavaTestOptions(options) 283 ProcessJavaTestOptions(args)
304 284
305 if options.java_only and options.python_only: 285 if not args.host_driven_root:
306 error_func('Options java_only (-j) and python_only (-p) ' 286 args.run_python_tests = False
307 'are mutually exclusive.') 287
308 options.run_java_tests = True 288 args.test_apk_path = os.path.join(
309 options.run_python_tests = True
310 if options.java_only:
311 options.run_python_tests = False
312 elif options.python_only:
313 options.run_java_tests = False
314
315 if not options.host_driven_root:
316 options.run_python_tests = False
317
318 if not options.test_apk:
319 error_func('--test-apk must be specified.')
320
321
322 options.test_apk_path = os.path.join(
323 constants.GetOutDirectory(), 289 constants.GetOutDirectory(),
324 constants.SDK_BUILD_APKS_DIR, 290 constants.SDK_BUILD_APKS_DIR,
325 '%s.apk' % options.test_apk) 291 '%s.apk' % args.test_apk)
326 options.test_apk_jar_path = os.path.join( 292 args.test_apk_jar_path = os.path.join(
327 constants.GetOutDirectory(), 293 constants.GetOutDirectory(),
328 constants.SDK_BUILD_TEST_JAVALIB_DIR, 294 constants.SDK_BUILD_TEST_JAVALIB_DIR,
329 '%s.jar' % options.test_apk) 295 '%s.jar' % args.test_apk)
330 options.test_support_apk_path = '%sSupport%s' % ( 296 args.test_support_apk_path = '%sSupport%s' % (
331 os.path.splitext(options.test_apk_path)) 297 os.path.splitext(args.test_apk_path))
332 298
333 options.test_runner = apk_helper.GetInstrumentationName(options.test_apk_path) 299 args.test_runner = apk_helper.GetInstrumentationName(args.test_apk_path)
334 300
301 # TODO(jbudorick): Get rid of InstrumentationOptions.
335 return instrumentation_test_options.InstrumentationOptions( 302 return instrumentation_test_options.InstrumentationOptions(
336 options.tool, 303 args.tool,
337 options.cleanup_test_files, 304 args.cleanup_test_files,
338 options.annotations, 305 args.annotations,
339 options.exclude_annotations, 306 args.exclude_annotations,
340 options.test_filter, 307 args.test_filter,
341 options.test_data, 308 args.test_data,
342 options.save_perf_json, 309 args.save_perf_json,
343 options.screenshot_failures, 310 args.screenshot_failures,
344 options.wait_for_debugger, 311 args.wait_for_debugger,
345 options.coverage_dir, 312 args.coverage_dir,
346 options.test_apk, 313 args.test_apk,
347 options.test_apk_path, 314 args.test_apk_path,
348 options.test_apk_jar_path, 315 args.test_apk_jar_path,
349 options.test_runner, 316 args.test_runner,
350 options.test_support_apk_path, 317 args.test_support_apk_path,
351 options.device_flags, 318 args.device_flags,
352 options.isolate_file_path 319 args.isolate_file_path
353 ) 320 )
354 321
355 322
356 def AddUIAutomatorTestOptions(option_parser): 323 def AddUIAutomatorTestOptions(parser):
357 """Adds UI Automator test options to |option_parser|.""" 324 """Adds UI Automator test options to |parser|."""
358 325
359 option_parser.usage = '%prog uiautomator [options]' 326 group = parser.add_argument_group('UIAutomator Test Options')
360 option_parser.commands_dict = {} 327 AddJavaTestOptions(group)
361 option_parser.example = ( 328 group.add_argument(
362 '%prog uiautomator --test-jar=chrome_shell_uiautomator_tests' 329 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
363 ' --package=chrome_shell') 330 metavar='PACKAGE', help='Package under test.')
364 option_parser.add_option( 331 group.add_argument(
365 '--package', 332 '--test-jar', dest='test_jar', required=True,
366 help=('Package under test. Possible values: %s' %
367 constants.PACKAGE_INFO.keys()))
368 option_parser.add_option(
369 '--test-jar', dest='test_jar',
370 help=('The name of the dexed jar containing the tests (without the ' 333 help=('The name of the dexed jar containing the tests (without the '
371 '.dex.jar extension). Alternatively, this can be a full path ' 334 '.dex.jar extension). Alternatively, this can be a full path '
372 'to the jar.')) 335 'to the jar.'))
373 336
374 AddJavaTestOptions(option_parser) 337 AddCommonOptions(parser)
375 AddCommonOptions(option_parser) 338 AddDeviceOptions(parser)
376 AddDeviceOptions(option_parser) 339
377 340
378 341 def ProcessUIAutomatorOptions(args):
379 def ProcessUIAutomatorOptions(options, error_func):
380 """Processes UIAutomator options/arguments. 342 """Processes UIAutomator options/arguments.
381 343
382 Args: 344 Args:
383 options: optparse.Options object. 345 args: argparse.Namespace object.
384 error_func: Function to call with the error message in case of an error.
385 346
386 Returns: 347 Returns:
387 A UIAutomatorOptions named tuple which contains all options relevant to 348 A UIAutomatorOptions named tuple which contains all options relevant to
388 uiautomator tests. 349 uiautomator tests.
389 """ 350 """
390 351
391 ProcessJavaTestOptions(options) 352 ProcessJavaTestOptions(args)
392 353
393 if not options.package: 354 if os.path.exists(args.test_jar):
394 error_func('--package is required.')
395
396 if options.package not in constants.PACKAGE_INFO:
397 error_func('Invalid package.')
398
399 if not options.test_jar:
400 error_func('--test-jar must be specified.')
401
402 if os.path.exists(options.test_jar):
403 # The dexed JAR is fully qualified, assume the info JAR lives along side. 355 # The dexed JAR is fully qualified, assume the info JAR lives along side.
404 options.uiautomator_jar = options.test_jar 356 args.uiautomator_jar = args.test_jar
405 else: 357 else:
406 options.uiautomator_jar = os.path.join( 358 args.uiautomator_jar = os.path.join(
407 constants.GetOutDirectory(), 359 constants.GetOutDirectory(),
408 constants.SDK_BUILD_JAVALIB_DIR, 360 constants.SDK_BUILD_JAVALIB_DIR,
409 '%s.dex.jar' % options.test_jar) 361 '%s.dex.jar' % args.test_jar)
410 options.uiautomator_info_jar = ( 362 args.uiautomator_info_jar = (
411 options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] + 363 args.uiautomator_jar[:args.uiautomator_jar.find('.dex.jar')] +
412 '_java.jar') 364 '_java.jar')
413 365
414 return uiautomator_test_options.UIAutomatorOptions( 366 return uiautomator_test_options.UIAutomatorOptions(
415 options.tool, 367 args.tool,
416 options.cleanup_test_files, 368 args.cleanup_test_files,
417 options.annotations, 369 args.annotations,
418 options.exclude_annotations, 370 args.exclude_annotations,
419 options.test_filter, 371 args.test_filter,
420 options.test_data, 372 args.test_data,
421 options.save_perf_json, 373 args.save_perf_json,
422 options.screenshot_failures, 374 args.screenshot_failures,
423 options.uiautomator_jar, 375 args.uiautomator_jar,
424 options.uiautomator_info_jar, 376 args.uiautomator_info_jar,
425 options.package) 377 args.package)
426 378
427 379
428 def AddJUnitTestOptions(option_parser): 380 def AddJUnitTestOptions(parser):
429 """Adds junit test options to |option_parser|.""" 381 """Adds junit test options to |parser|."""
430 option_parser.usage = '%prog junit -s [test suite name]' 382
431 option_parser.commands_dict = {} 383 group = parser.add_argument_group('JUnit Test Options')
432 384 group.add_argument(
433 option_parser.add_option( 385 '-s', '--test-suite', dest='test_suite', required=True,
434 '-s', '--test-suite', dest='test_suite',
435 help=('JUnit test suite to run.')) 386 help=('JUnit test suite to run.'))
436 option_parser.add_option( 387 group.add_argument(
437 '-f', '--test-filter', dest='test_filter', 388 '-f', '--test-filter', dest='test_filter',
438 help='Filters tests googletest-style.') 389 help='Filters tests googletest-style.')
439 option_parser.add_option( 390 group.add_argument(
440 '--package-filter', dest='package_filter', 391 '--package-filter', dest='package_filter',
441 help='Filters tests by package.') 392 help='Filters tests by package.')
442 option_parser.add_option( 393 group.add_argument(
443 '--runner-filter', dest='runner_filter', 394 '--runner-filter', dest='runner_filter',
444 help='Filters tests by runner class. Must be fully qualified.') 395 help='Filters tests by runner class. Must be fully qualified.')
445 option_parser.add_option( 396 group.add_argument(
446 '--sdk-version', dest='sdk_version', type="int", 397 '--sdk-version', dest='sdk_version', type=int,
447 help='The Android SDK version.') 398 help='The Android SDK version.')
448 AddCommonOptions(option_parser) 399 AddCommonOptions(parser)
449 400
450 401
451 def ProcessJUnitTestOptions(options, error_func): 402 def AddMonkeyTestOptions(parser):
452 """Processes all JUnit test options.""" 403 """Adds monkey test options to |parser|."""
453 if not options.test_suite: 404
454 error_func('No test suite specified.') 405 group = parser.add_argument_group('Monkey Test Options')
455 return options 406 group.add_argument(
456 407 '--package', required=True, choices=constants.PACKAGE_INFO.keys(),
457 408 metavar='PACKAGE', help='Package under test.')
458 def AddMonkeyTestOptions(option_parser): 409 group.add_argument(
459 """Adds monkey test options to |option_parser|.""" 410 '--event-count', default=10000, type=int,
460 411 help='Number of events to generate (default: %(default)s).')
461 option_parser.usage = '%prog monkey [options]' 412 group.add_argument(
462 option_parser.commands_dict = {}
463 option_parser.example = (
464 '%prog monkey --package=chrome_shell')
465
466 option_parser.add_option(
467 '--package',
468 help=('Package under test. Possible values: %s' %
469 constants.PACKAGE_INFO.keys()))
470 option_parser.add_option(
471 '--event-count', default=10000, type='int',
472 help='Number of events to generate [default: %default].')
473 option_parser.add_option(
474 '--category', default='', 413 '--category', default='',
475 help='A list of allowed categories.') 414 help='A list of allowed categories.')
476 option_parser.add_option( 415 group.add_argument(
477 '--throttle', default=100, type='int', 416 '--throttle', default=100, type=int,
478 help='Delay between events (ms) [default: %default]. ') 417 help='Delay between events (ms) (default: %(default)s). ')
479 option_parser.add_option( 418 group.add_argument(
480 '--seed', type='int', 419 '--seed', type=int,
481 help=('Seed value for pseudo-random generator. Same seed value generates ' 420 help=('Seed value for pseudo-random generator. Same seed value generates '
482 'the same sequence of events. Seed is randomized by default.')) 421 'the same sequence of events. Seed is randomized by default.'))
483 option_parser.add_option( 422 group.add_argument(
484 '--extra-args', default='', 423 '--extra-args', default='',
485 help=('String of other args to pass to the command verbatim ' 424 help=('String of other args to pass to the command verbatim.'))
486 '[default: "%default"].')) 425
487 426 AddCommonOptions(parser)
488 AddCommonOptions(option_parser) 427 AddDeviceOptions(parser)
489 AddDeviceOptions(option_parser) 428
490 429
491 430 def ProcessMonkeyTestOptions(args):
492 def ProcessMonkeyTestOptions(options, error_func):
493 """Processes all monkey test options. 431 """Processes all monkey test options.
494 432
495 Args: 433 Args:
496 options: optparse.Options object. 434 args: argparse.Namespace object.
497 error_func: Function to call with the error message in case of an error.
498 435
499 Returns: 436 Returns:
500 A MonkeyOptions named tuple which contains all options relevant to 437 A MonkeyOptions named tuple which contains all options relevant to
501 monkey tests. 438 monkey tests.
502 """ 439 """
503 if not options.package: 440 # TODO(jbudorick): Handle this directly in argparse with nargs='+'
504 error_func('--package is required.') 441 category = args.category
505
506 if options.package not in constants.PACKAGE_INFO:
507 error_func('Invalid package.')
508
509 category = options.category
510 if category: 442 if category:
511 category = options.category.split(',') 443 category = args.category.split(',')
512 444
445 # TODO(jbudorick): Get rid of MonkeyOptions.
513 return monkey_test_options.MonkeyOptions( 446 return monkey_test_options.MonkeyOptions(
514 options.verbose_count, 447 args.verbose_count,
515 options.package, 448 args.package,
516 options.event_count, 449 args.event_count,
517 category, 450 category,
518 options.throttle, 451 args.throttle,
519 options.seed, 452 args.seed,
520 options.extra_args) 453 args.extra_args)
521 454
522 455
523 def AddPerfTestOptions(option_parser): 456 def AddPerfTestOptions(parser):
524 """Adds perf test options to |option_parser|.""" 457 """Adds perf test options to |parser|."""
525 458
526 option_parser.usage = '%prog perf [options]' 459 group = parser.add_argument_group('Perf Test Options')
527 option_parser.commands_dict = {} 460
528 option_parser.example = ('%prog perf ' 461 class SingleStepAction(argparse.Action):
529 '[--single-step -- command args] or ' 462 def __call__(self, parser, namespace, values, option_string=None):
530 '[--steps perf_steps.json] or ' 463 if values and not namespace.single_step:
531 '[--print-step step]') 464 parser.error('single step command provided, '
532 465 'but --single-step not specified.')
533 option_parser.add_option( 466 elif namespace.single_step and not values:
534 '--single-step', 467 parser.error('--single-step specified, '
535 action='store_true', 468 'but no single step command provided.')
469 setattr(namespace, self.dest, values)
470
471 step_group = group.add_mutually_exclusive_group(required=True)
472 # TODO(jbudorick): Revise --single-step to use argparse.REMAINDER.
473 # This requires removing "--" from client calls.
474 step_group.add_argument(
475 '--single-step', action='store_true',
536 help='Execute the given command with retries, but only print the result ' 476 help='Execute the given command with retries, but only print the result '
537 'for the "most successful" round.') 477 'for the "most successful" round.')
538 option_parser.add_option( 478 step_group.add_argument(
539 '--steps', 479 '--steps',
540 help='JSON file containing the list of commands to run.') 480 help='JSON file containing the list of commands to run.')
541 option_parser.add_option( 481 step_group.add_argument(
482 '--print-step',
483 help='The name of a previously executed perf step to print.')
484
485 group.add_argument(
486 '--output-json-list',
487 help='Write a simple list of names from --steps into the given file.')
488 group.add_argument(
489 '--collect-chartjson-data',
490 action='store_true',
491 help='Cache the chartjson output from each step for later use.')
492 group.add_argument(
493 '--output-chartjson-data',
494 default='',
495 help='Write out chartjson into the given file.')
496 group.add_argument(
542 '--flaky-steps', 497 '--flaky-steps',
543 help=('A JSON file containing steps that are flaky ' 498 help=('A JSON file containing steps that are flaky '
544 'and will have its exit code ignored.')) 499 'and will have its exit code ignored.'))
545 option_parser.add_option( 500 group.add_argument(
546 '--output-json-list',
547 help='Write a simple list of names from --steps into the given file.')
548 option_parser.add_option(
549 '--print-step',
550 help='The name of a previously executed perf step to print.')
551 option_parser.add_option(
552 '--no-timeout', action='store_true', 501 '--no-timeout', action='store_true',
553 help=('Do not impose a timeout. Each perf step is responsible for ' 502 help=('Do not impose a timeout. Each perf step is responsible for '
554 'implementing the timeout logic.')) 503 'implementing the timeout logic.'))
555 option_parser.add_option( 504 group.add_argument(
556 '-f', '--test-filter', 505 '-f', '--test-filter',
557 help=('Test filter (will match against the names listed in --steps).')) 506 help=('Test filter (will match against the names listed in --steps).'))
558 option_parser.add_option( 507 group.add_argument(
559 '--dry-run', 508 '--dry-run', action='store_true',
560 action='store_true',
561 help='Just print the steps without executing.') 509 help='Just print the steps without executing.')
562 AddCommonOptions(option_parser) 510 group.add_argument('single_step_command', nargs='*', action=SingleStepAction,
563 AddDeviceOptions(option_parser) 511 help='If --single-step is specified, the command to run.')
564 512 AddCommonOptions(parser)
565 513 AddDeviceOptions(parser)
566 def ProcessPerfTestOptions(options, args, error_func): 514
515
516 def ProcessPerfTestOptions(args):
567 """Processes all perf test options. 517 """Processes all perf test options.
568 518
569 Args: 519 Args:
570 options: optparse.Options object. 520 args: argparse.Namespace object.
571 error_func: Function to call with the error message in case of an error.
572 521
573 Returns: 522 Returns:
574 A PerfOptions named tuple which contains all options relevant to 523 A PerfOptions named tuple which contains all options relevant to
575 perf tests. 524 perf tests.
576 """ 525 """
577 # Only one of steps, print_step or single_step must be provided. 526 # TODO(jbudorick): Move single_step handling down into the perf tests.
578 count = len(filter(None, 527 if args.single_step:
579 [options.steps, options.print_step, options.single_step])) 528 args.single_step = ' '.join(args.single_step_command)
580 if count != 1: 529 # TODO(jbudorick): Get rid of PerfOptions.
581 error_func('Please specify one of: --steps, --print-step, --single-step.')
582 single_step = None
583 if options.single_step:
584 single_step = ' '.join(args[2:])
585 return perf_test_options.PerfOptions( 530 return perf_test_options.PerfOptions(
586 options.steps, options.flaky_steps, options.output_json_list, 531 args.steps, args.flaky_steps, args.output_json_list,
587 options.print_step, options.no_timeout, options.test_filter, 532 args.print_step, args.no_timeout, args.test_filter,
588 options.dry_run, single_step) 533 args.dry_run, args.single_step, args.collect_chartjson_data,
589 534 args.output_chartjson_data)
590 535
591 def AddPythonTestOptions(option_parser): 536
592 option_parser.add_option('-s', '--suite', dest='suite_name', 537 def AddPythonTestOptions(parser):
593 help=('Name of the test suite to run' 538 group = parser.add_argument_group('Python Test Options')
594 '(use -s help to list them).')) 539 group.add_argument(
595 AddCommonOptions(option_parser) 540 '-s', '--suite', dest='suite_name', metavar='SUITE_NAME',
596 541 choices=constants.PYTHON_UNIT_TEST_SUITES.keys(),
597 542 help='Name of the test suite to run.')
598 def ProcessPythonTestOptions(options, error_func): 543 AddCommonOptions(parser)
599 if options.suite_name not in constants.PYTHON_UNIT_TEST_SUITES: 544
600 available = ('Available test suites: [%s]' % 545
601 ', '.join(constants.PYTHON_UNIT_TEST_SUITES.iterkeys())) 546 def _RunGTests(args, devices):
602 if options.suite_name == 'help':
603 print available
604 else:
605 error_func('"%s" is not a valid suite. %s' %
606 (options.suite_name, available))
607
608
609 def _RunGTests(options, devices):
610 """Subcommand of RunTestsCommands which runs gtests.""" 547 """Subcommand of RunTestsCommands which runs gtests."""
611 ProcessGTestOptions(options)
612
613 exit_code = 0 548 exit_code = 0
614 for suite_name in options.suite_name: 549 for suite_name in args.suite_name:
615 # TODO(gkanwar): Move this into ProcessGTestOptions once we require -s for 550 # TODO(jbudorick): Either deprecate multi-suite or move its handling down
616 # the gtest command. 551 # into the gtest code.
617 gtest_options = gtest_test_options.GTestOptions( 552 gtest_options = gtest_test_options.GTestOptions(
618 options.tool, 553 args.tool,
619 options.cleanup_test_files, 554 args.cleanup_test_files,
620 options.test_filter, 555 args.test_filter,
621 options.run_disabled, 556 args.run_disabled,
622 options.test_arguments, 557 args.test_arguments,
623 options.timeout, 558 args.timeout,
624 options.isolate_file_path, 559 args.isolate_file_path,
625 suite_name) 560 suite_name)
626 runner_factory, tests = gtest_setup.Setup(gtest_options, devices) 561 runner_factory, tests = gtest_setup.Setup(gtest_options, devices)
627 562
628 results, test_exit_code = test_dispatcher.RunTests( 563 results, test_exit_code = test_dispatcher.RunTests(
629 tests, runner_factory, devices, shard=True, test_timeout=None, 564 tests, runner_factory, devices, shard=True, test_timeout=None,
630 num_retries=options.num_retries) 565 num_retries=args.num_retries)
631 566
632 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: 567 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
633 exit_code = test_exit_code 568 exit_code = test_exit_code
634 569
635 report_results.LogFull( 570 report_results.LogFull(
636 results=results, 571 results=results,
637 test_type='Unit test', 572 test_type='Unit test',
638 test_package=suite_name, 573 test_package=suite_name,
639 flakiness_server=options.flakiness_dashboard_server) 574 flakiness_server=args.flakiness_dashboard_server)
575
576 if args.json_results_file:
577 json_results.GenerateJsonResultsFile(results, args.json_results_file)
640 578
641 if os.path.isdir(constants.ISOLATE_DEPS_DIR): 579 if os.path.isdir(constants.ISOLATE_DEPS_DIR):
642 shutil.rmtree(constants.ISOLATE_DEPS_DIR) 580 shutil.rmtree(constants.ISOLATE_DEPS_DIR)
643 581
644 return exit_code 582 return exit_code
645 583
646 584
647 def _RunLinkerTests(options, devices): 585 def _RunLinkerTests(args, devices):
648 """Subcommand of RunTestsCommands which runs linker tests.""" 586 """Subcommand of RunTestsCommands which runs linker tests."""
649 runner_factory, tests = linker_setup.Setup(options, devices) 587 runner_factory, tests = linker_setup.Setup(args, devices)
650 588
651 results, exit_code = test_dispatcher.RunTests( 589 results, exit_code = test_dispatcher.RunTests(
652 tests, runner_factory, devices, shard=True, test_timeout=60, 590 tests, runner_factory, devices, shard=True, test_timeout=60,
653 num_retries=options.num_retries) 591 num_retries=args.num_retries)
654 592
655 report_results.LogFull( 593 report_results.LogFull(
656 results=results, 594 results=results,
657 test_type='Linker test', 595 test_type='Linker test',
658 test_package='ChromiumLinkerTest') 596 test_package='ChromiumLinkerTest')
659 597
598 if args.json_results_file:
599 json_results.GenerateJsonResultsFile(results, args.json_results_file)
600
660 return exit_code 601 return exit_code
661 602
662 603
663 def _RunInstrumentationTests(options, error_func, devices): 604 def _RunInstrumentationTests(args, devices):
664 """Subcommand of RunTestsCommands which runs instrumentation tests.""" 605 """Subcommand of RunTestsCommands which runs instrumentation tests."""
665 instrumentation_options = ProcessInstrumentationOptions(options, error_func) 606 logging.info('_RunInstrumentationTests(%s, %s)' % (str(args), str(devices)))
666 607
667 if len(devices) > 1 and options.wait_for_debugger: 608 instrumentation_options = ProcessInstrumentationOptions(args)
609
610 if len(devices) > 1 and args.wait_for_debugger:
668 logging.warning('Debugger can not be sharded, using first available device') 611 logging.warning('Debugger can not be sharded, using first available device')
669 devices = devices[:1] 612 devices = devices[:1]
670 613
671 results = base_test_result.TestRunResults() 614 results = base_test_result.TestRunResults()
672 exit_code = 0 615 exit_code = 0
673 616
674 if options.run_java_tests: 617 if args.run_java_tests:
675 runner_factory, tests = instrumentation_setup.Setup( 618 runner_factory, tests = instrumentation_setup.Setup(
676 instrumentation_options, devices) 619 instrumentation_options, devices)
677 620
678 test_results, exit_code = test_dispatcher.RunTests( 621 test_results, exit_code = test_dispatcher.RunTests(
679 tests, runner_factory, devices, shard=True, test_timeout=None, 622 tests, runner_factory, devices, shard=True, test_timeout=None,
680 num_retries=options.num_retries) 623 num_retries=args.num_retries)
681 624
682 results.AddTestRunResults(test_results) 625 results.AddTestRunResults(test_results)
683 626
684 if options.run_python_tests: 627 if args.run_python_tests:
685 runner_factory, tests = host_driven_setup.InstrumentationSetup( 628 runner_factory, tests = host_driven_setup.InstrumentationSetup(
686 options.host_driven_root, options.official_build, 629 args.host_driven_root, args.official_build,
687 instrumentation_options) 630 instrumentation_options)
688 631
689 if tests: 632 if tests:
690 test_results, test_exit_code = test_dispatcher.RunTests( 633 test_results, test_exit_code = test_dispatcher.RunTests(
691 tests, runner_factory, devices, shard=True, test_timeout=None, 634 tests, runner_factory, devices, shard=True, test_timeout=None,
692 num_retries=options.num_retries) 635 num_retries=args.num_retries)
693 636
694 results.AddTestRunResults(test_results) 637 results.AddTestRunResults(test_results)
695 638
696 # Only allow exit code escalation 639 # Only allow exit code escalation
697 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: 640 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE:
698 exit_code = test_exit_code 641 exit_code = test_exit_code
699 642
700 if options.device_flags: 643 if args.device_flags:
701 options.device_flags = os.path.join(constants.DIR_SOURCE_ROOT, 644 args.device_flags = os.path.join(constants.DIR_SOURCE_ROOT,
702 options.device_flags) 645 args.device_flags)
703 646
704 report_results.LogFull( 647 report_results.LogFull(
705 results=results, 648 results=results,
706 test_type='Instrumentation', 649 test_type='Instrumentation',
707 test_package=os.path.basename(options.test_apk), 650 test_package=os.path.basename(args.test_apk),
708 annotation=options.annotations, 651 annotation=args.annotations,
709 flakiness_server=options.flakiness_dashboard_server) 652 flakiness_server=args.flakiness_dashboard_server)
653
654 if args.json_results_file:
655 json_results.GenerateJsonResultsFile(results, args.json_results_file)
710 656
711 return exit_code 657 return exit_code
712 658
713 659
714 def _RunUIAutomatorTests(options, error_func, devices): 660 def _RunUIAutomatorTests(args, devices):
715 """Subcommand of RunTestsCommands which runs uiautomator tests.""" 661 """Subcommand of RunTestsCommands which runs uiautomator tests."""
716 uiautomator_options = ProcessUIAutomatorOptions(options, error_func) 662 uiautomator_options = ProcessUIAutomatorOptions(args)
717 663
718 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options) 664 runner_factory, tests = uiautomator_setup.Setup(uiautomator_options)
719 665
720 results, exit_code = test_dispatcher.RunTests( 666 results, exit_code = test_dispatcher.RunTests(
721 tests, runner_factory, devices, shard=True, test_timeout=None, 667 tests, runner_factory, devices, shard=True, test_timeout=None,
722 num_retries=options.num_retries) 668 num_retries=args.num_retries)
723 669
724 report_results.LogFull( 670 report_results.LogFull(
725 results=results, 671 results=results,
726 test_type='UIAutomator', 672 test_type='UIAutomator',
727 test_package=os.path.basename(options.test_jar), 673 test_package=os.path.basename(args.test_jar),
728 annotation=options.annotations, 674 annotation=args.annotations,
729 flakiness_server=options.flakiness_dashboard_server) 675 flakiness_server=args.flakiness_dashboard_server)
676
677 if args.json_results_file:
678 json_results.GenerateJsonResultsFile(results, args.json_results_file)
730 679
731 return exit_code 680 return exit_code
732 681
733 682
734 def _RunJUnitTests(options, error_func): 683 def _RunJUnitTests(args):
735 """Subcommand of RunTestsCommand which runs junit tests.""" 684 """Subcommand of RunTestsCommand which runs junit tests."""
736 junit_options = ProcessJUnitTestOptions(options, error_func) 685 runner_factory, tests = junit_setup.Setup(args)
737 runner_factory, tests = junit_setup.Setup(junit_options)
738 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory) 686 _, exit_code = junit_dispatcher.RunTests(tests, runner_factory)
739
740 return exit_code 687 return exit_code
741 688
742 689
743 def _RunMonkeyTests(options, error_func, devices): 690 def _RunMonkeyTests(args, devices):
744 """Subcommand of RunTestsCommands which runs monkey tests.""" 691 """Subcommand of RunTestsCommands which runs monkey tests."""
745 monkey_options = ProcessMonkeyTestOptions(options, error_func) 692 monkey_options = ProcessMonkeyTestOptions(args)
746 693
747 runner_factory, tests = monkey_setup.Setup(monkey_options) 694 runner_factory, tests = monkey_setup.Setup(monkey_options)
748 695
749 results, exit_code = test_dispatcher.RunTests( 696 results, exit_code = test_dispatcher.RunTests(
750 tests, runner_factory, devices, shard=False, test_timeout=None, 697 tests, runner_factory, devices, shard=False, test_timeout=None,
751 num_retries=options.num_retries) 698 num_retries=args.num_retries)
752 699
753 report_results.LogFull( 700 report_results.LogFull(
754 results=results, 701 results=results,
755 test_type='Monkey', 702 test_type='Monkey',
756 test_package='Monkey') 703 test_package='Monkey')
757 704
705 if args.json_results_file:
706 json_results.GenerateJsonResultsFile(results, args.json_results_file)
707
758 return exit_code 708 return exit_code
759 709
760 710
761 def _RunPerfTests(options, args, error_func): 711 def _RunPerfTests(args):
762 """Subcommand of RunTestsCommands which runs perf tests.""" 712 """Subcommand of RunTestsCommands which runs perf tests."""
763 perf_options = ProcessPerfTestOptions(options, args, error_func) 713 perf_options = ProcessPerfTestOptions(args)
764 714
765 # Just save a simple json with a list of test names. 715 # Just save a simple json with a list of test names.
766 if perf_options.output_json_list: 716 if perf_options.output_json_list:
767 return perf_test_runner.OutputJsonList( 717 return perf_test_runner.OutputJsonList(
768 perf_options.steps, perf_options.output_json_list) 718 perf_options.steps, perf_options.output_json_list)
769 719
720 if perf_options.output_chartjson_data:
721 return perf_test_runner.OutputChartjson(
722 perf_options.print_step, perf_options.output_chartjson_data)
723
770 # Just print the results from a single previously executed step. 724 # Just print the results from a single previously executed step.
771 if perf_options.print_step: 725 if perf_options.print_step:
772 return perf_test_runner.PrintTestOutput(perf_options.print_step) 726 return perf_test_runner.PrintTestOutput(perf_options.print_step)
773 727
774 runner_factory, tests, devices = perf_setup.Setup(perf_options) 728 runner_factory, tests, devices = perf_setup.Setup(perf_options)
775 729
776 # shard=False means that each device will get the full list of tests 730 # shard=False means that each device will get the full list of tests
777 # and then each one will decide their own affinity. 731 # and then each one will decide their own affinity.
778 # shard=True means each device will pop the next test available from a queue, 732 # shard=True means each device will pop the next test available from a queue,
779 # which increases throughput but have no affinity. 733 # which increases throughput but have no affinity.
780 results, _ = test_dispatcher.RunTests( 734 results, _ = test_dispatcher.RunTests(
781 tests, runner_factory, devices, shard=False, test_timeout=None, 735 tests, runner_factory, devices, shard=False, test_timeout=None,
782 num_retries=options.num_retries) 736 num_retries=args.num_retries)
783 737
784 report_results.LogFull( 738 report_results.LogFull(
785 results=results, 739 results=results,
786 test_type='Perf', 740 test_type='Perf',
787 test_package='Perf') 741 test_package='Perf')
788 742
743 if args.json_results_file:
744 json_results.GenerateJsonResultsFile(results, args.json_results_file)
745
789 if perf_options.single_step: 746 if perf_options.single_step:
790 return perf_test_runner.PrintTestOutput('single_step') 747 return perf_test_runner.PrintTestOutput('single_step')
791 748
792 perf_test_runner.PrintSummary(tests) 749 perf_test_runner.PrintSummary(tests)
793 750
794 # Always return 0 on the sharding stage. Individual tests exit_code 751 # Always return 0 on the sharding stage. Individual tests exit_code
795 # will be returned on the print_step stage. 752 # will be returned on the print_step stage.
796 return 0 753 return 0
797 754
798 755
799 def _RunPythonTests(options, error_func): 756 def _RunPythonTests(args):
800 """Subcommand of RunTestsCommand which runs python unit tests.""" 757 """Subcommand of RunTestsCommand which runs python unit tests."""
801 ProcessPythonTestOptions(options, error_func) 758 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[args.suite_name]
802
803 suite_vars = constants.PYTHON_UNIT_TEST_SUITES[options.suite_name]
804 suite_path = suite_vars['path'] 759 suite_path = suite_vars['path']
805 suite_test_modules = suite_vars['test_modules'] 760 suite_test_modules = suite_vars['test_modules']
806 761
807 sys.path = [suite_path] + sys.path 762 sys.path = [suite_path] + sys.path
808 try: 763 try:
809 suite = unittest.TestSuite() 764 suite = unittest.TestSuite()
810 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m) 765 suite.addTests(unittest.defaultTestLoader.loadTestsFromName(m)
811 for m in suite_test_modules) 766 for m in suite_test_modules)
812 runner = unittest.TextTestRunner(verbosity=1+options.verbose_count) 767 runner = unittest.TextTestRunner(verbosity=1+args.verbose_count)
813 return 0 if runner.run(suite).wasSuccessful() else 1 768 return 0 if runner.run(suite).wasSuccessful() else 1
814 finally: 769 finally:
815 sys.path = sys.path[1:] 770 sys.path = sys.path[1:]
816 771
817 772
818 def _GetAttachedDevices(test_device=None): 773 def _GetAttachedDevices(test_device=None):
819 """Get all attached devices. 774 """Get all attached devices.
820 775
821 Args: 776 Args:
822 test_device: Name of a specific device to use. 777 test_device: Name of a specific device to use.
823 778
824 Returns: 779 Returns:
825 A list of attached devices. 780 A list of attached devices.
826 """ 781 """
827 attached_devices = [] 782 attached_devices = []
828 783
829 attached_devices = android_commands.GetAttachedDevices() 784 attached_devices = android_commands.GetAttachedDevices()
830 if test_device: 785 if test_device:
831 assert test_device in attached_devices, ( 786 assert test_device in attached_devices, (
832 'Did not find device %s among attached device. Attached devices: %s' 787 'Did not find device %s among attached device. Attached devices: %s'
833 % (test_device, ', '.join(attached_devices))) 788 % (test_device, ', '.join(attached_devices)))
834 attached_devices = [test_device] 789 attached_devices = [test_device]
835 790
836 assert attached_devices, 'No devices attached.' 791 assert attached_devices, 'No devices attached.'
837 792
838 return sorted(attached_devices) 793 return sorted(attached_devices)
839 794
840 795
841 def RunTestsCommand(command, options, args, option_parser): 796 def RunTestsCommand(args, parser):
842 """Checks test type and dispatches to the appropriate function. 797 """Checks test type and dispatches to the appropriate function.
843 798
844 Args: 799 Args:
845 command: String indicating the command that was received to trigger 800 args: argparse.Namespace object.
846 this function. 801 parser: argparse.ArgumentParser object.
847 options: optparse options dictionary.
848 args: List of extra args from optparse.
849 option_parser: optparse.OptionParser object.
850 802
851 Returns: 803 Returns:
852 Integer indicated exit code. 804 Integer indicated exit code.
853 805
854 Raises: 806 Raises:
855 Exception: Unknown command name passed in, or an exception from an 807 Exception: Unknown command name passed in, or an exception from an
856 individual test runner. 808 individual test runner.
857 """ 809 """
810 command = args.command
858 811
859 # Check for extra arguments 812 ProcessCommonOptions(args)
860 if len(args) > 2 and command != 'perf':
861 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:])))
862 return constants.ERROR_EXIT_CODE
863 if command == 'perf':
864 if ((options.single_step and len(args) <= 2) or
865 (not options.single_step and len(args) > 2)):
866 option_parser.error('Unrecognized arguments: %s' % (' '.join(args)))
867 return constants.ERROR_EXIT_CODE
868 813
869 ProcessCommonOptions(options, option_parser.error) 814 if args.enable_platform_mode:
870 815 return RunTestsInPlatformMode(args, parser.error)
871 if options.enable_platform_mode:
872 return RunTestsInPlatformMode(command, options, option_parser)
873 816
874 if command in constants.LOCAL_MACHINE_TESTS: 817 if command in constants.LOCAL_MACHINE_TESTS:
875 devices = [] 818 devices = []
876 else: 819 else:
877 devices = _GetAttachedDevices(options.test_device) 820 devices = _GetAttachedDevices(args.test_device)
878 821
879 forwarder.Forwarder.RemoveHostLog() 822 forwarder.Forwarder.RemoveHostLog()
880 if not ports.ResetTestServerPortAllocation(): 823 if not ports.ResetTestServerPortAllocation():
881 raise Exception('Failed to reset test server port.') 824 raise Exception('Failed to reset test server port.')
882 825
883 if command == 'gtest': 826 if command == 'gtest':
884 return _RunGTests(options, devices) 827 return _RunGTests(args, devices)
885 elif command == 'linker': 828 elif command == 'linker':
886 return _RunLinkerTests(options, devices) 829 return _RunLinkerTests(args, devices)
887 elif command == 'instrumentation': 830 elif command == 'instrumentation':
888 return _RunInstrumentationTests(options, option_parser.error, devices) 831 return _RunInstrumentationTests(args, devices)
889 elif command == 'uiautomator': 832 elif command == 'uiautomator':
890 return _RunUIAutomatorTests(options, option_parser.error, devices) 833 return _RunUIAutomatorTests(args, devices)
891 elif command == 'junit': 834 elif command == 'junit':
892 return _RunJUnitTests(options, option_parser.error) 835 return _RunJUnitTests(args)
893 elif command == 'monkey': 836 elif command == 'monkey':
894 return _RunMonkeyTests(options, option_parser.error, devices) 837 return _RunMonkeyTests(args, devices)
895 elif command == 'perf': 838 elif command == 'perf':
896 return _RunPerfTests(options, args, option_parser.error) 839 return _RunPerfTests(args)
897 elif command == 'python': 840 elif command == 'python':
898 return _RunPythonTests(options, option_parser.error) 841 return _RunPythonTests(args)
899 else: 842 else:
900 raise Exception('Unknown test type.') 843 raise Exception('Unknown test type.')
901 844
902 845
903 _SUPPORTED_IN_PLATFORM_MODE = [ 846 _SUPPORTED_IN_PLATFORM_MODE = [
904 # TODO(jbudorick): Add support for more test types. 847 # TODO(jbudorick): Add support for more test types.
905 'gtest', 848 'gtest',
906 ] 849 ]
907 850
908 851
909 def RunTestsInPlatformMode(command, options, option_parser): 852 def RunTestsInPlatformMode(args, parser):
910 853
911 if command not in _SUPPORTED_IN_PLATFORM_MODE: 854 if args.command not in _SUPPORTED_IN_PLATFORM_MODE:
912 option_parser.error('%s is not yet supported in platform mode' % command) 855 parser.error('%s is not yet supported in platform mode' % args.command)
913 856
914 with environment_factory.CreateEnvironment( 857 with environment_factory.CreateEnvironment(args, parser.error) as env:
915 command, options, option_parser.error) as env: 858 with test_instance_factory.CreateTestInstance(args, parser.error) as test:
916 with test_instance_factory.CreateTestInstance(
917 command, options, option_parser.error) as test:
918 with test_run_factory.CreateTestRun( 859 with test_run_factory.CreateTestRun(
919 options, env, test, option_parser.error) as test_run: 860 args, env, test, parser.error) as test_run:
920 results = test_run.RunTests() 861 results = test_run.RunTests()
921 862
922 report_results.LogFull( 863 report_results.LogFull(
923 results=results, 864 results=results,
924 test_type=test.TestType(), 865 test_type=test.TestType(),
925 test_package=test_run.TestPackage(), 866 test_package=test_run.TestPackage(),
926 annotation=options.annotations, 867 annotation=args.annotations,
927 flakiness_server=options.flakiness_dashboard_server) 868 flakiness_server=args.flakiness_dashboard_server)
869
870 if args.json_results_file:
871 json_results.GenerateJsonResultsFile(
872 results, args.json_results_file)
928 873
929 return results 874 return results
930 875
931 876
932 def HelpCommand(command, _options, args, option_parser): 877 CommandConfigTuple = collections.namedtuple(
933 """Display help for a certain command, or overall help. 878 'CommandConfigTuple',
934 879 ['add_options_func', 'help_txt'])
935 Args:
936 command: String indicating the command that was received to trigger
937 this function.
938 options: optparse options dictionary. unused.
939 args: List of extra args from optparse.
940 option_parser: optparse.OptionParser object.
941
942 Returns:
943 Integer indicated exit code.
944 """
945 # If we don't have any args, display overall help
946 if len(args) < 3:
947 option_parser.print_help()
948 return 0
949 # If we have too many args, print an error
950 if len(args) > 3:
951 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[3:])))
952 return constants.ERROR_EXIT_CODE
953
954 command = args[2]
955
956 if command not in VALID_COMMANDS:
957 option_parser.error('Unrecognized command.')
958
959 # Treat the help command as a special case. We don't care about showing a
960 # specific help page for itself.
961 if command == 'help':
962 option_parser.print_help()
963 return 0
964
965 VALID_COMMANDS[command].add_options_func(option_parser)
966 option_parser.usage = '%prog ' + command + ' [options]'
967 option_parser.commands_dict = {}
968 option_parser.print_help()
969
970 return 0
971
972
973 # Define a named tuple for the values in the VALID_COMMANDS dictionary so the
974 # syntax is a bit prettier. The tuple is two functions: (add options, run
975 # command).
976 CommandFunctionTuple = collections.namedtuple(
977 'CommandFunctionTuple', ['add_options_func', 'run_command_func'])
978 VALID_COMMANDS = { 880 VALID_COMMANDS = {
979 'gtest': CommandFunctionTuple(AddGTestOptions, RunTestsCommand), 881 'gtest': CommandConfigTuple(
980 'instrumentation': CommandFunctionTuple( 882 AddGTestOptions,
981 AddInstrumentationTestOptions, RunTestsCommand), 883 'googletest-based C++ tests'),
982 'uiautomator': CommandFunctionTuple( 884 'instrumentation': CommandConfigTuple(
983 AddUIAutomatorTestOptions, RunTestsCommand), 885 AddInstrumentationTestOptions,
984 'junit': CommandFunctionTuple( 886 'InstrumentationTestCase-based Java tests'),
985 AddJUnitTestOptions, RunTestsCommand), 887 'uiautomator': CommandConfigTuple(
986 'monkey': CommandFunctionTuple( 888 AddUIAutomatorTestOptions,
987 AddMonkeyTestOptions, RunTestsCommand), 889 "Tests that run via Android's uiautomator command"),
988 'perf': CommandFunctionTuple( 890 'junit': CommandConfigTuple(
989 AddPerfTestOptions, RunTestsCommand), 891 AddJUnitTestOptions,
990 'python': CommandFunctionTuple( 892 'JUnit4-based Java tests'),
991 AddPythonTestOptions, RunTestsCommand), 893 'monkey': CommandConfigTuple(
992 'linker': CommandFunctionTuple( 894 AddMonkeyTestOptions,
993 AddLinkerTestOptions, RunTestsCommand), 895 "Tests based on Android's monkey"),
994 'help': CommandFunctionTuple(lambda option_parser: None, HelpCommand) 896 'perf': CommandConfigTuple(
995 } 897 AddPerfTestOptions,
898 'Performance tests'),
899 'python': CommandConfigTuple(
900 AddPythonTestOptions,
901 'Python tests based on unittest.TestCase'),
902 'linker': CommandConfigTuple(
903 AddLinkerTestOptions,
904 'Linker tests'),
905 }
996 906
997 907
998 def DumpThreadStacks(_signal, _frame): 908 def DumpThreadStacks(_signal, _frame):
999 for thread in threading.enumerate(): 909 for thread in threading.enumerate():
1000 reraiser_thread.LogThreadStack(thread) 910 reraiser_thread.LogThreadStack(thread)
1001 911
1002 912
1003 def main(): 913 def main():
1004 signal.signal(signal.SIGUSR1, DumpThreadStacks) 914 signal.signal(signal.SIGUSR1, DumpThreadStacks)
1005 option_parser = command_option_parser.CommandOptionParser( 915
1006 commands_dict=VALID_COMMANDS) 916 parser = argparse.ArgumentParser()
1007 return command_option_parser.ParseAndExecute(option_parser) 917 command_parsers = parser.add_subparsers(title='test types',
918 dest='command')
919
920 for test_type, config in sorted(VALID_COMMANDS.iteritems(),
921 key=lambda x: x[0]):
922 subparser = command_parsers.add_parser(
923 test_type, usage='%(prog)s [options]', help=config.help_txt)
924 config.add_options_func(subparser)
925
926 args = parser.parse_args()
927 return RunTestsCommand(args, parser)
1008 928
1009 929
1010 if __name__ == '__main__': 930 if __name__ == '__main__':
1011 sys.exit(main()) 931 sys.exit(main())
OLDNEW
« no previous file with comments | « build/android/setup.gyp ('k') | build/common.gypi » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698