OLD | NEW |
---|---|
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # | 2 # |
3 # Copyright 2013 The Chromium Authors. All rights reserved. | 3 # Copyright 2013 The Chromium Authors. All rights reserved. |
4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
6 | 6 |
7 """Runs all types of tests from one unified interface. | 7 """Runs all types of tests from one unified interface. |
8 | 8 |
9 TODO(gkanwar): | 9 TODO(gkanwar): |
10 * Add options to run Monkey tests. | 10 * Add options to run Monkey tests. |
11 """ | 11 """ |
12 | 12 |
13 import collections | 13 import collections |
14 import optparse | 14 import optparse |
15 import os | 15 import os |
16 import sys | 16 import sys |
17 | 17 |
18 from pylib import cmd_helper | 18 from pylib import cmd_helper |
19 from pylib import constants | 19 from pylib import constants |
20 from pylib import ports | 20 from pylib import ports |
21 from pylib.base import base_test_result | 21 from pylib.base import base_test_result |
22 from pylib.browsertests import dispatch as browsertests_dispatch | 22 from pylib.base import test_dispatcher |
23 from pylib.gtest import dispatch as gtest_dispatch | 23 from pylib.browsertests import setup as browsertests_setup |
24 from pylib.gtest import setup as gtest_setup | |
25 from pylib.gtest import gtest_config | |
24 from pylib.host_driven import run_python_tests as python_dispatch | 26 from pylib.host_driven import run_python_tests as python_dispatch |
25 from pylib.instrumentation import dispatch as instrumentation_dispatch | 27 from pylib.instrumentation import setup as instrumentation_setup |
26 from pylib.uiautomator import dispatch as uiautomator_dispatch | 28 from pylib.uiautomator import setup as uiautomator_setup |
27 from pylib.utils import emulator, report_results, run_tests_helper | 29 from pylib.utils import report_results |
30 from pylib.utils import run_tests_helper | |
28 | 31 |
29 | 32 |
30 _SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out') | 33 _SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out') |
31 | 34 |
32 | 35 |
33 def AddBuildTypeOption(option_parser): | 36 def AddBuildTypeOption(option_parser): |
34 """Adds the build type option to |option_parser|.""" | 37 """Adds the build type option to |option_parser|.""" |
35 default_build_type = 'Debug' | 38 default_build_type = 'Debug' |
36 if 'BUILDTYPE' in os.environ: | 39 if 'BUILDTYPE' in os.environ: |
37 default_build_type = os.environ['BUILDTYPE'] | 40 default_build_type = os.environ['BUILDTYPE'] |
38 option_parser.add_option('--debug', action='store_const', const='Debug', | 41 option_parser.add_option('--debug', action='store_const', const='Debug', |
39 dest='build_type', default=default_build_type, | 42 dest='build_type', default=default_build_type, |
40 help=('If set, run test suites under out/Debug. ' | 43 help=('If set, run test suites under out/Debug. ' |
41 'Default is env var BUILDTYPE or Debug.')) | 44 'Default is env var BUILDTYPE or Debug.')) |
42 option_parser.add_option('--release', action='store_const', | 45 option_parser.add_option('--release', action='store_const', |
43 const='Release', dest='build_type', | 46 const='Release', dest='build_type', |
44 help=('If set, run test suites under out/Release.' | 47 help=('If set, run test suites under out/Release.' |
45 ' Default is env var BUILDTYPE or Debug.')) | 48 ' Default is env var BUILDTYPE or Debug.')) |
46 | 49 |
47 | 50 |
48 def AddEmulatorOptions(option_parser): | |
49 """Adds all emulator-related options to |option_parser|.""" | |
50 | |
51 # TODO(gkanwar): Figure out what we're doing with the emulator setup | |
52 # and determine whether these options should be deprecated/removed. | |
53 option_parser.add_option('-e', '--emulator', dest='use_emulator', | |
54 action='store_true', | |
55 help='Run tests in a new instance of emulator.') | |
56 option_parser.add_option('-n', '--emulator-count', | |
57 type='int', default=1, | |
58 help=('Number of emulators to launch for ' | |
59 'running the tests.')) | |
60 option_parser.add_option('--abi', default='armeabi-v7a', | |
61 help='Platform of emulators to launch.') | |
62 | |
63 | |
64 def ProcessEmulatorOptions(options): | |
65 """Processes emulator options.""" | |
66 if options.use_emulator: | |
67 emulator.DeleteAllTempAVDs() | |
68 | |
69 | |
70 def AddCommonOptions(option_parser): | 51 def AddCommonOptions(option_parser): |
71 """Adds all common options to |option_parser|.""" | 52 """Adds all common options to |option_parser|.""" |
72 | 53 |
73 AddBuildTypeOption(option_parser) | 54 AddBuildTypeOption(option_parser) |
74 | 55 |
75 option_parser.add_option('--out-directory', dest='out_directory', | 56 option_parser.add_option('--out-directory', dest='out_directory', |
76 help=('Path to the out/ directory, irrespective of ' | 57 help=('Path to the out/ directory, irrespective of ' |
77 'the build type. Only for non-Chromium uses.')) | 58 'the build type. Only for non-Chromium uses.')) |
78 option_parser.add_option('-c', dest='cleanup_test_files', | 59 option_parser.add_option('-c', dest='cleanup_test_files', |
79 help='Cleanup test files on the device after run', | 60 help='Cleanup test files on the device after run', |
(...skipping 24 matching lines...) Expand all Loading... | |
104 help=('Address of the server that is hosting the ' | 85 help=('Address of the server that is hosting the ' |
105 'Chrome for Android flakiness dashboard.')) | 86 'Chrome for Android flakiness dashboard.')) |
106 option_parser.add_option('--skip-deps-push', dest='push_deps', | 87 option_parser.add_option('--skip-deps-push', dest='push_deps', |
107 action='store_false', default=True, | 88 action='store_false', default=True, |
108 help=('Do not push dependencies to the device. ' | 89 help=('Do not push dependencies to the device. ' |
109 'Use this at own risk for speeding up test ' | 90 'Use this at own risk for speeding up test ' |
110 'execution on local machine.')) | 91 'execution on local machine.')) |
111 option_parser.add_option('-d', '--device', dest='test_device', | 92 option_parser.add_option('-d', '--device', dest='test_device', |
112 help=('Target device for the test suite ' | 93 help=('Target device for the test suite ' |
113 'to run on.')) | 94 'to run on.')) |
95 option_parser.add_option('-t', dest='timeout', | |
96 help='Timeout to wait for each test', | |
97 type='int', | |
98 default=60) | |
114 | 99 |
115 | 100 |
116 def ProcessCommonOptions(options): | 101 def ProcessCommonOptions(options): |
117 """Processes and handles all common options.""" | 102 """Processes and handles all common options.""" |
118 if options.out_directory: | 103 if options.out_directory: |
119 cmd_helper.OutDirectory.set(options.out_directory) | 104 cmd_helper.OutDirectory.set(options.out_directory) |
120 run_tests_helper.SetLogLevel(options.verbose_count) | 105 run_tests_helper.SetLogLevel(options.verbose_count) |
121 | 106 |
122 | 107 |
123 def AddCoreGTestOptions(option_parser, default_timeout=60): | 108 def AddCoreGTestOptions(option_parser): |
124 """Add options specific to the gtest framework to |option_parser|.""" | 109 """Add options specific to the gtest framework to |option_parser|.""" |
125 | 110 |
126 # TODO(gkanwar): Consolidate and clean up test filtering for gtests and | 111 # TODO(gkanwar): Consolidate and clean up test filtering for gtests and |
127 # content_browsertests. | 112 # content_browsertests. |
128 option_parser.add_option('--gtest_filter', dest='test_filter', | 113 option_parser.add_option('--gtest_filter', dest='gtest_filter', |
129 help='Filter GTests by name.') | 114 help='Filter GTests by name.') |
130 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', | 115 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', |
131 help='Additional arguments to pass to the test.') | 116 help='Additional arguments to pass to the test.') |
132 # TODO(gkanwar): Most likely deprecate/remove this option once we've pinned | |
133 # down what we're doing with the emulator setup. | |
134 option_parser.add_option('-x', '--xvfb', dest='use_xvfb', | |
135 action='store_true', | |
136 help='Use Xvfb around tests (ignored if not Linux).') | |
137 # TODO(gkanwar): Possible deprecate this flag. Waiting on word from Peter | 117 # TODO(gkanwar): Possible deprecate this flag. Waiting on word from Peter |
138 # Beverloo. | 118 # Beverloo. |
139 option_parser.add_option('--webkit', action='store_true', | 119 option_parser.add_option('--webkit', action='store_true', |
140 help='Run the tests from a WebKit checkout.') | 120 help='Run the tests from a WebKit checkout.') |
141 option_parser.add_option('--exe', action='store_true', | 121 option_parser.add_option('--exe', action='store_true', |
142 help='If set, use the exe test runner instead of ' | 122 help='If set, use the exe test runner instead of ' |
143 'the APK.') | 123 'the APK.') |
144 option_parser.add_option('-t', dest='timeout', | |
145 help='Timeout to wait for each test', | |
146 type='int', | |
147 default=default_timeout) | |
148 | 124 |
149 | 125 |
150 def AddContentBrowserTestOptions(option_parser): | 126 def AddContentBrowserTestOptions(option_parser): |
151 """Adds Content Browser test options to |option_parser|.""" | 127 """Adds Content Browser test options to |option_parser|.""" |
152 | 128 |
153 option_parser.usage = '%prog content_browsertests [options]' | 129 option_parser.usage = '%prog content_browsertests [options]' |
154 option_parser.command_list = [] | 130 option_parser.command_list = [] |
155 option_parser.example = '%prog content_browsertests' | 131 option_parser.example = '%prog content_browsertests' |
156 | 132 |
157 AddCoreGTestOptions(option_parser) | 133 AddCoreGTestOptions(option_parser) |
158 AddCommonOptions(option_parser) | 134 AddCommonOptions(option_parser) |
159 | 135 |
160 | 136 |
161 def AddGTestOptions(option_parser): | 137 def AddGTestOptions(option_parser): |
162 """Adds gtest options to |option_parser|.""" | 138 """Adds gtest options to |option_parser|.""" |
163 | 139 |
164 option_parser.usage = '%prog gtest [options]' | 140 option_parser.usage = '%prog gtest [options]' |
165 option_parser.command_list = [] | 141 option_parser.command_list = [] |
166 option_parser.example = '%prog gtest -s base_unittests' | 142 option_parser.example = '%prog gtest -s base_unittests' |
167 | 143 |
168 option_parser.add_option('-s', '--suite', dest='test_suite', | 144 # TODO(gkanwar): Make this option required |
145 option_parser.add_option('-s', '--suite', dest='suite_name', | |
169 help=('Executable name of the test suite to run ' | 146 help=('Executable name of the test suite to run ' |
170 '(use -s help to list them).')) | 147 '(use -s help to list them).')) |
171 AddCoreGTestOptions(option_parser) | 148 AddCoreGTestOptions(option_parser) |
172 # TODO(gkanwar): Move these to Common Options once we have the plumbing | 149 # TODO(gkanwar): Move these to Common Options once we have the plumbing |
173 # in our other test types to handle these commands | 150 # in our other test types to handle these commands |
174 AddEmulatorOptions(option_parser) | |
175 AddCommonOptions(option_parser) | 151 AddCommonOptions(option_parser) |
176 | 152 |
177 | 153 |
154 def ProcessGTestOptions(options): | |
155 """Intercept test suite help to list test suites. | |
156 | |
157 Args: | |
158 options: Command line options. | |
159 | |
160 Returns: | |
161 True if the command should continue. | |
162 """ | |
163 if options.suite_name == 'help': | |
164 print 'Available test suites are:' | |
165 for test_suite in gtest_config.STABLE_TEST_SUITES: | |
166 print test_suite.name | |
167 return False | |
168 | |
169 # Convert to a list, assuming all test suites if nothing was specified. | |
170 # TODO(gkanwar): Require having a test suite | |
171 if options.suite_name: | |
172 options.suite_name = [options.suite_name] | |
173 else: | |
174 options.suite_name = [suite.name | |
175 for suite in gtest_config.STABLE_TEST_SUITES] | |
176 return True | |
177 | |
178 | |
178 def AddJavaTestOptions(option_parser): | 179 def AddJavaTestOptions(option_parser): |
179 """Adds the Java test options to |option_parser|.""" | 180 """Adds the Java test options to |option_parser|.""" |
180 | 181 |
181 option_parser.add_option('-f', '--test_filter', dest='test_filter', | 182 option_parser.add_option('-f', '--test_filter', dest='test_filter', |
182 help=('Test filter (if not fully qualified, ' | 183 help=('Test filter (if not fully qualified, ' |
183 'will run all matches).')) | 184 'will run all matches).')) |
184 option_parser.add_option( | 185 option_parser.add_option( |
185 '-A', '--annotation', dest='annotation_str', | 186 '-A', '--annotation', dest='annotation_str', |
186 help=('Comma-separated list of annotations. Run only tests with any of ' | 187 help=('Comma-separated list of annotations. Run only tests with any of ' |
187 'the given annotations. An annotation can be either a key or a ' | 188 'the given annotations. An annotation can be either a key or a ' |
188 'key-values pair. A test that has no annotation is considered ' | 189 'key-values pair. A test that has no annotation is considered ' |
189 '"SmallTest".')) | 190 '"SmallTest".')) |
190 option_parser.add_option( | 191 option_parser.add_option( |
191 '-E', '--exclude-annotation', dest='exclude_annotation_str', | 192 '-E', '--exclude-annotation', dest='exclude_annotation_str', |
192 help=('Comma-separated list of annotations. Exclude tests with these ' | 193 help=('Comma-separated list of annotations. Exclude tests with these ' |
193 'annotations.')) | 194 'annotations.')) |
194 option_parser.add_option('-j', '--java_only', action='store_true', | 195 option_parser.add_option('-j', '--java_only', action='store_true', |
195 default=False, help='Run only the Java tests.') | 196 default=False, help='Run only the Java tests.') |
196 option_parser.add_option('-p', '--python_only', action='store_true', | 197 option_parser.add_option('-p', '--python_only', action='store_true', |
197 default=False, | 198 default=False, |
198 help='Run only the host-driven tests.') | 199 help='Run only the host-driven tests.') |
199 option_parser.add_option('--screenshot', dest='screenshot_failures', | 200 option_parser.add_option('--screenshot', dest='screenshot_failures', |
200 action='store_true', | 201 action='store_true', |
201 help='Capture screenshots of test failures') | 202 help='Capture screenshots of test failures') |
202 option_parser.add_option('--save-perf-json', action='store_true', | 203 option_parser.add_option('--save-perf-json', action='store_true', |
203 help='Saves the JSON file for each UI Perf test.') | 204 help='Saves the JSON file for each UI Perf test.') |
204 # TODO(gkanwar): Remove this option. It is not used anywhere. | |
205 option_parser.add_option('--shard_retries', type=int, default=1, | |
206 help=('Number of times to retry each failure when ' | |
207 'sharding.')) | |
208 option_parser.add_option('--official-build', help='Run official build tests.') | 205 option_parser.add_option('--official-build', help='Run official build tests.') |
209 option_parser.add_option('--python_test_root', | 206 option_parser.add_option('--python_test_root', |
210 help='Root of the host-driven tests.') | 207 help='Root of the host-driven tests.') |
211 option_parser.add_option('--keep_test_server_ports', | 208 option_parser.add_option('--keep_test_server_ports', |
212 action='store_true', | 209 action='store_true', |
213 help=('Indicates the test server ports must be ' | 210 help=('Indicates the test server ports must be ' |
214 'kept. When this is run via a sharder ' | 211 'kept. When this is run via a sharder ' |
215 'the test server ports should be kept and ' | 212 'the test server ports should be kept and ' |
216 'should not be reset.')) | 213 'should not be reset.')) |
217 # TODO(gkanwar): This option is deprecated. Remove it in the future. | 214 # TODO(gkanwar): This option is deprecated. Remove it in the future. |
(...skipping 24 matching lines...) Expand all Loading... | |
242 options.run_java_tests = False | 239 options.run_java_tests = False |
243 | 240 |
244 if not options.python_test_root: | 241 if not options.python_test_root: |
245 options.run_python_tests = False | 242 options.run_python_tests = False |
246 | 243 |
247 if options.annotation_str: | 244 if options.annotation_str: |
248 options.annotations = options.annotation_str.split(',') | 245 options.annotations = options.annotation_str.split(',') |
249 elif options.test_filter: | 246 elif options.test_filter: |
250 options.annotations = [] | 247 options.annotations = [] |
251 else: | 248 else: |
252 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest'] | 249 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', |
250 'EnormousTest'] | |
253 | 251 |
254 if options.exclude_annotation_str: | 252 if options.exclude_annotation_str: |
255 options.exclude_annotations = options.exclude_annotation_str.split(',') | 253 options.exclude_annotations = options.exclude_annotation_str.split(',') |
256 else: | 254 else: |
257 options.exclude_annotations = [] | 255 options.exclude_annotations = [] |
258 | 256 |
259 if not options.keep_test_server_ports: | 257 if not options.keep_test_server_ports: |
260 if not ports.ResetTestServerPortAllocation(): | 258 if not ports.ResetTestServerPortAllocation(): |
261 raise Exception('Failed to reset test server port.') | 259 raise Exception('Failed to reset test server port.') |
262 | 260 |
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
344 options.uiautomator_jar = options.test_jar | 342 options.uiautomator_jar = options.test_jar |
345 else: | 343 else: |
346 options.uiautomator_jar = os.path.join( | 344 options.uiautomator_jar = os.path.join( |
347 _SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_JAVALIB_DIR, | 345 _SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_JAVALIB_DIR, |
348 '%s.dex.jar' % options.test_jar) | 346 '%s.dex.jar' % options.test_jar) |
349 options.uiautomator_info_jar = ( | 347 options.uiautomator_info_jar = ( |
350 options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] + | 348 options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] + |
351 '_java.jar') | 349 '_java.jar') |
352 | 350 |
353 | 351 |
352 def _RunGTests(options, error_func): | |
353 """Subcommand of RunTestsCommands which runs gtests.""" | |
354 if not ProcessGTestOptions(options): | |
355 return 0 | |
356 | |
357 exit_code = 0 | |
358 for suite_name in options.suite_name: | |
359 runner_factory, tests = gtest_setup.Setup( | |
360 options.exe, suite_name, options.test_arguments, | |
361 options.timeout, options.cleanup_test_files, options.tool, | |
362 options.build_type, options.webkit, options.push_deps, | |
363 options.gtest_filter) | |
364 | |
365 results, test_exit_code = test_dispatcher.RunTests( | |
366 tests, runner_factory, False, options.test_device, True, | |
367 build_type=options.build_type, | |
368 test_timeout=None, | |
369 num_retries=options.num_retries) | |
370 | |
371 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | |
372 exit_code = test_exit_code | |
373 | |
374 report_results.LogFull( | |
375 results=results, | |
376 test_type='Unit test', | |
377 test_package=suite_name, | |
378 build_type=options.build_type, | |
379 flakiness_server=options.flakiness_dashboard_server) | |
380 | |
381 return exit_code | |
382 | |
383 | |
384 def _RunContentBrowserTests(options, error_func): | |
385 """Subcommand of RunTestsCommands which runs content_browsertests.""" | |
386 runner_factory, tests = browsertests_setup.Setup( | |
387 options.test_arguments, options.timeout, options.cleanup_test_files, | |
388 options.tool, options.build_type, options.webkit, options.push_deps, | |
389 options.gtest_filter) | |
390 | |
391 # TODO(nileshagrawal): remove this abnormally long setup timeout once fewer | |
392 # files are pushed to the devices for content_browsertests: crbug.com/138275 | |
393 setup_timeout = 20 * 60 # 20 minutes | |
394 results, exit_code = test_dispatcher.RunTests( | |
395 tests, runner_factory, False, options.test_device, True, | |
396 build_type=options.build_type, | |
397 test_timeout=None, | |
398 setup_timeout=setup_timeout, | |
399 num_retries=options.num_retries) | |
400 | |
401 report_results.LogFull( | |
402 results=results, | |
403 test_type='Unit test', | |
404 test_package=constants.BROWSERTEST_SUITE_NAME, | |
405 build_type=options.build_type, | |
406 flakiness_server=options.flakiness_dashboard_server) | |
407 | |
408 return exit_code | |
409 | |
410 | |
411 def _RunInstrumentationTests(options, error_func): | |
412 """Subcommand of RunTestsCommands which runs instrumentation tests.""" | |
413 ProcessInstrumentationOptions(options, error_func) | |
414 | |
415 results = base_test_result.TestRunResults() | |
416 exit_code = 0 | |
417 | |
418 if options.run_java_tests: | |
419 runner_factory, tests = instrumentation_setup.Setup( | |
420 options.test_apk_path, options.test_apk_jar_path, options.annotations, | |
421 options.exclude_annotations, options.test_filter, options.build_type, | |
422 options.test_data, options.install_apk, options.save_perf_json, | |
423 options.screenshot_failures, options.tool, options.wait_for_debugger, | |
424 options.disable_assertions, options.push_deps, | |
425 options.cleanup_test_files) | |
426 | |
427 test_results, exit_code = test_dispatcher.RunTests( | |
428 tests, runner_factory, options.wait_for_debugger, | |
429 options.test_device, True, | |
430 build_type=options.build_type, | |
431 test_timeout=None, | |
432 num_retries=options.num_retries) | |
433 | |
434 results.AddTestRunResults(test_results) | |
435 | |
436 if options.run_python_tests: | |
437 test_results, test_exit_code = ( | |
438 python_dispatch.DispatchPythonTests(options)) | |
439 | |
440 results.AddTestRunResults(test_results) | |
441 | |
442 # Only allow exit code escalation | |
443 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | |
444 exit_code = test_exit_code | |
445 | |
446 report_results.LogFull( | |
447 results=results, | |
448 test_type='Instrumentation', | |
449 test_package=os.path.basename(options.test_apk), | |
450 annotation=options.annotations, | |
451 build_type=options.build_type, | |
452 flakiness_server=options.flakiness_dashboard_server) | |
453 | |
454 return exit_code | |
455 | |
456 | |
457 def _RunUIAutomatorTests(options, error_func): | |
458 """Subcommand of RunTestsCommands which runs uiautomator tests.""" | |
459 ProcessUIAutomatorOptions(options, error_func) | |
460 | |
461 results = base_test_result.TestRunResults() | |
462 exit_code = 0 | |
463 | |
464 if options.run_java_tests: | |
465 runner_factory, tests = uiautomator_setup.Setup( | |
466 options.uiautomator_jar, options.uiautomator_info_jar, | |
467 options.annotations, options.exclude_annotations, options.test_filter, | |
468 options.package_name, options.build_type, options.test_data, | |
469 options.save_perf_json, options.screenshot_failures, options.tool, | |
470 options.disable_assertions, options.push_deps, | |
471 options.cleanup_test_files) | |
472 | |
473 test_results, exit_code = test_dispatcher.RunTests( | |
474 tests, runner_factory, False, options.test_device, True, | |
475 build_type=options.build_type, | |
476 test_timeout=None, | |
477 num_retries=options.num_retries) | |
478 | |
479 results.AddTestRunResults(test_results) | |
480 | |
481 if options.run_python_tests: | |
482 test_results, test_exit_code = ( | |
483 python_dispatch.DispatchPythonTests(options)) | |
484 | |
485 results.AddTestRunResults(test_results) | |
486 | |
487 # Only allow exit code escalation | |
488 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | |
489 exit_code = test_exit_code | |
490 | |
491 report_results.LogFull( | |
492 results=results, | |
493 test_type='UIAutomator', | |
494 test_package=os.path.basename(options.test_jar), | |
495 annotation=options.annotations, | |
496 build_type=options.build_type, | |
497 flakiness_server=options.flakiness_dashboard_server) | |
498 | |
499 return exit_code | |
500 | |
501 | |
354 def RunTestsCommand(command, options, args, option_parser): | 502 def RunTestsCommand(command, options, args, option_parser): |
355 """Checks test type and dispatches to the appropriate function. | 503 """Checks test type and dispatches to the appropriate function. |
356 | 504 |
357 Args: | 505 Args: |
358 command: String indicating the command that was received to trigger | 506 command: String indicating the command that was received to trigger |
359 this function. | 507 this function. |
360 options: optparse options dictionary. | 508 options: optparse options dictionary. |
361 args: List of extra args from optparse. | 509 args: List of extra args from optparse. |
362 option_parser: optparse.OptionParser object. | 510 option_parser: optparse.OptionParser object. |
363 | 511 |
364 Returns: | 512 Returns: |
365 Integer indicated exit code. | 513 Integer indicated exit code. |
366 | 514 |
367 Raises: | 515 Raises: |
368 Exception: Unknown command name passed in, or an exception from an | 516 Exception: Unknown command name passed in, or an exception from an |
369 individual test runner. | 517 individual test runner. |
370 """ | 518 """ |
371 | 519 |
372 # Check for extra arguments | 520 # Check for extra arguments |
373 if len(args) > 2: | 521 if len(args) > 2: |
374 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:]))) | 522 option_parser.error('Unrecognized arguments: %s' % (' '.join(args[2:]))) |
375 return constants.ERROR_EXIT_CODE | 523 return constants.ERROR_EXIT_CODE |
376 | 524 |
377 ProcessCommonOptions(options) | 525 ProcessCommonOptions(options) |
378 | 526 |
379 if command == 'gtest': | 527 if command == 'gtest': |
380 # TODO(gkanwar): See the emulator TODO above -- this call should either go | 528 return _RunGTests(options, option_parser.error) |
381 # away or become generalized. | |
382 ProcessEmulatorOptions(options) | |
383 results, exit_code = gtest_dispatch.Dispatch(options) | |
384 elif command == 'content_browsertests': | 529 elif command == 'content_browsertests': |
385 results, exit_code = browsertests_dispatch.Dispatch(options) | 530 return _RunContentBrowserTests(options, option_parser.error) |
386 elif command == 'instrumentation': | 531 elif command == 'instrumentation': |
387 ProcessInstrumentationOptions(options, option_parser.error) | 532 return _RunInstrumentationTests(options, option_parser.error) |
388 results = base_test_result.TestRunResults() | |
389 exit_code = 0 | |
390 if options.run_java_tests: | |
391 test_results, exit_code = instrumentation_dispatch.Dispatch(options) | |
392 results.AddTestRunResults(test_results) | |
393 if options.run_python_tests: | |
394 test_results, test_exit_code = (python_dispatch. | |
395 DispatchPythonTests(options)) | |
396 results.AddTestRunResults(test_results) | |
397 # Only allow exit code escalation | |
398 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | |
399 exit_code = test_exit_code | |
400 report_results.LogFull( | |
401 results=results, | |
402 test_type='Instrumentation', | |
403 test_package=os.path.basename(options.test_apk), | |
404 annotation=options.annotations, | |
405 build_type=options.build_type, | |
406 flakiness_server=options.flakiness_dashboard_server) | |
407 elif command == 'uiautomator': | 533 elif command == 'uiautomator': |
408 ProcessUIAutomatorOptions(options, option_parser.error) | 534 return _RunUIAutomatorTests(options, option_parser.error) |
409 results = base_test_result.TestRunResults() | |
410 exit_code = 0 | |
411 if options.run_java_tests: | |
412 test_results, exit_code = uiautomator_dispatch.Dispatch(options) | |
413 results.AddTestRunResults(test_results) | |
414 if options.run_python_tests: | |
415 test_results, test_exit_code = (python_dispatch. | |
416 DispatchPythonTests(options)) | |
417 results.AddTestRunResults(test_results) | |
418 # Only allow exit code escalation | |
419 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | |
420 exit_code = test_exit_code | |
421 report_results.LogFull( | |
422 results=results, | |
423 test_type='UIAutomator', | |
424 test_package=os.path.basename(options.test_jar), | |
425 annotation=options.annotations, | |
426 build_type=options.build_type, | |
427 flakiness_server=options.flakiness_dashboard_server) | |
428 else: | 535 else: |
429 raise Exception('Unknown test type state') | 536 raise Exception('Unknown test type state') |
craigdh
2013/07/17 23:10:25
why the word "state"?
gkanwar
2013/07/17 23:24:20
That's a good question... Fixed.
| |
430 | 537 |
431 return exit_code | 538 return exit_code |
432 | 539 |
433 | 540 |
434 def HelpCommand(command, options, args, option_parser): | 541 def HelpCommand(command, options, args, option_parser): |
435 """Display help for a certain command, or overall help. | 542 """Display help for a certain command, or overall help. |
436 | 543 |
437 Args: | 544 Args: |
438 command: String indicating the command that was received to trigger | 545 command: String indicating the command that was received to trigger |
439 this function. | 546 this function. |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
526 return 0 | 633 return 0 |
527 command = argv[1] | 634 command = argv[1] |
528 VALID_COMMANDS[command].add_options_func(option_parser) | 635 VALID_COMMANDS[command].add_options_func(option_parser) |
529 options, args = option_parser.parse_args(argv) | 636 options, args = option_parser.parse_args(argv) |
530 return VALID_COMMANDS[command].run_command_func( | 637 return VALID_COMMANDS[command].run_command_func( |
531 command, options, args, option_parser) | 638 command, options, args, option_parser) |
532 | 639 |
533 | 640 |
534 if __name__ == '__main__': | 641 if __name__ == '__main__': |
535 sys.exit(main(sys.argv)) | 642 sys.exit(main(sys.argv)) |
OLD | NEW |