OLD | NEW |
1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
2 # | 2 # |
3 # Copyright 2013 The Chromium Authors. All rights reserved. | 3 # Copyright 2013 The Chromium Authors. All rights reserved. |
4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
6 | 6 |
7 """Runs all types of tests from one unified interface. | 7 """Runs all types of tests from one unified interface. |
8 | 8 |
9 TODO(gkanwar): | 9 TODO(gkanwar): |
10 * Add options to run Monkey tests. | 10 * Add options to run Monkey tests. |
11 """ | 11 """ |
12 | 12 |
13 import collections | 13 import collections |
14 import optparse | 14 import optparse |
15 import os | 15 import os |
16 import shutil | 16 import shutil |
17 import sys | 17 import sys |
18 | 18 |
19 from pylib import cmd_helper | |
20 from pylib import constants | 19 from pylib import constants |
21 from pylib import ports | 20 from pylib import ports |
22 from pylib.base import base_test_result | 21 from pylib.base import base_test_result |
23 from pylib.base import test_dispatcher | 22 from pylib.base import test_dispatcher |
24 from pylib.browsertests import setup as browsertests_setup | 23 from pylib.browsertests import setup as browsertests_setup |
| 24 from pylib.gtest import gtest_config |
25 from pylib.gtest import setup as gtest_setup | 25 from pylib.gtest import setup as gtest_setup |
26 from pylib.gtest import gtest_config | 26 from pylib.host_driven import setup as host_driven_setup |
27 from pylib.host_driven import run_python_tests as python_dispatch | |
28 from pylib.instrumentation import setup as instrumentation_setup | 27 from pylib.instrumentation import setup as instrumentation_setup |
29 from pylib.uiautomator import setup as uiautomator_setup | 28 from pylib.uiautomator import setup as uiautomator_setup |
30 from pylib.utils import report_results | 29 from pylib.utils import report_results |
31 from pylib.utils import run_tests_helper | 30 from pylib.utils import run_tests_helper |
32 | 31 |
33 | 32 |
34 _SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out') | 33 _SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out') |
35 | 34 |
36 | 35 |
37 def AddBuildTypeOption(option_parser): | 36 def AddBuildTypeOption(option_parser): |
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
177 option_parser.add_option( | 176 option_parser.add_option( |
178 '-A', '--annotation', dest='annotation_str', | 177 '-A', '--annotation', dest='annotation_str', |
179 help=('Comma-separated list of annotations. Run only tests with any of ' | 178 help=('Comma-separated list of annotations. Run only tests with any of ' |
180 'the given annotations. An annotation can be either a key or a ' | 179 'the given annotations. An annotation can be either a key or a ' |
181 'key-values pair. A test that has no annotation is considered ' | 180 'key-values pair. A test that has no annotation is considered ' |
182 '"SmallTest".')) | 181 '"SmallTest".')) |
183 option_parser.add_option( | 182 option_parser.add_option( |
184 '-E', '--exclude-annotation', dest='exclude_annotation_str', | 183 '-E', '--exclude-annotation', dest='exclude_annotation_str', |
185 help=('Comma-separated list of annotations. Exclude tests with these ' | 184 help=('Comma-separated list of annotations. Exclude tests with these ' |
186 'annotations.')) | 185 'annotations.')) |
187 option_parser.add_option('-j', '--java_only', action='store_true', | |
188 default=False, help='Run only the Java tests.') | |
189 option_parser.add_option('-p', '--python_only', action='store_true', | |
190 default=False, | |
191 help='Run only the host-driven tests.') | |
192 option_parser.add_option('--screenshot', dest='screenshot_failures', | 186 option_parser.add_option('--screenshot', dest='screenshot_failures', |
193 action='store_true', | 187 action='store_true', |
194 help='Capture screenshots of test failures') | 188 help='Capture screenshots of test failures') |
195 option_parser.add_option('--save-perf-json', action='store_true', | 189 option_parser.add_option('--save-perf-json', action='store_true', |
196 help='Saves the JSON file for each UI Perf test.') | 190 help='Saves the JSON file for each UI Perf test.') |
197 option_parser.add_option('--official-build', help='Run official build tests.') | 191 option_parser.add_option('--official-build', help='Run official build tests.') |
198 option_parser.add_option('--python_test_root', | |
199 help='Root of the host-driven tests.') | |
200 option_parser.add_option('--keep_test_server_ports', | 192 option_parser.add_option('--keep_test_server_ports', |
201 action='store_true', | 193 action='store_true', |
202 help=('Indicates the test server ports must be ' | 194 help=('Indicates the test server ports must be ' |
203 'kept. When this is run via a sharder ' | 195 'kept. When this is run via a sharder ' |
204 'the test server ports should be kept and ' | 196 'the test server ports should be kept and ' |
205 'should not be reset.')) | 197 'should not be reset.')) |
206 # TODO(gkanwar): This option is deprecated. Remove it in the future. | 198 # TODO(gkanwar): This option is deprecated. Remove it in the future. |
207 option_parser.add_option('--disable_assertions', action='store_true', | 199 option_parser.add_option('--disable_assertions', action='store_true', |
208 help=('(DEPRECATED) Run with java assertions ' | 200 help=('(DEPRECATED) Run with java assertions ' |
209 'disabled.')) | 201 'disabled.')) |
210 option_parser.add_option('--test_data', action='append', default=[], | 202 option_parser.add_option('--test_data', action='append', default=[], |
211 help=('Each instance defines a directory of test ' | 203 help=('Each instance defines a directory of test ' |
212 'data that should be copied to the target(s) ' | 204 'data that should be copied to the target(s) ' |
213 'before running the tests. The argument ' | 205 'before running the tests. The argument ' |
214 'should be of the form <target>:<source>, ' | 206 'should be of the form <target>:<source>, ' |
215 '<target> is relative to the device data' | 207 '<target> is relative to the device data' |
216 'directory, and <source> is relative to the ' | 208 'directory, and <source> is relative to the ' |
217 'chromium build directory.')) | 209 'chromium build directory.')) |
218 | 210 |
219 | 211 |
220 def ProcessJavaTestOptions(options, error_func): | 212 def ProcessJavaTestOptions(options, error_func): |
221 """Processes options/arguments and populates |options| with defaults.""" | 213 """Processes options/arguments and populates |options| with defaults.""" |
222 | 214 |
223 if options.java_only and options.python_only: | |
224 error_func('Options java_only (-j) and python_only (-p) ' | |
225 'are mutually exclusive.') | |
226 options.run_java_tests = True | |
227 options.run_python_tests = True | |
228 if options.java_only: | |
229 options.run_python_tests = False | |
230 elif options.python_only: | |
231 options.run_java_tests = False | |
232 | |
233 if not options.python_test_root: | |
234 options.run_python_tests = False | |
235 | |
236 if options.annotation_str: | 215 if options.annotation_str: |
237 options.annotations = options.annotation_str.split(',') | 216 options.annotations = options.annotation_str.split(',') |
238 elif options.test_filter: | 217 elif options.test_filter: |
239 options.annotations = [] | 218 options.annotations = [] |
240 else: | 219 else: |
241 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', | 220 options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest', |
242 'EnormousTest'] | 221 'EnormousTest'] |
243 | 222 |
244 if options.exclude_annotation_str: | 223 if options.exclude_annotation_str: |
245 options.exclude_annotations = options.exclude_annotation_str.split(',') | 224 options.exclude_annotations = options.exclude_annotation_str.split(',') |
246 else: | 225 else: |
247 options.exclude_annotations = [] | 226 options.exclude_annotations = [] |
248 | 227 |
249 if not options.keep_test_server_ports: | 228 if not options.keep_test_server_ports: |
250 if not ports.ResetTestServerPortAllocation(): | 229 if not ports.ResetTestServerPortAllocation(): |
251 raise Exception('Failed to reset test server port.') | 230 raise Exception('Failed to reset test server port.') |
252 | 231 |
253 | 232 |
254 def AddInstrumentationTestOptions(option_parser): | 233 def AddInstrumentationTestOptions(option_parser): |
255 """Adds Instrumentation test options to |option_parser|.""" | 234 """Adds Instrumentation test options to |option_parser|.""" |
256 | 235 |
257 option_parser.usage = '%prog instrumentation [options]' | 236 option_parser.usage = '%prog instrumentation [options]' |
258 option_parser.command_list = [] | 237 option_parser.command_list = [] |
259 option_parser.example = ('%prog instrumentation -I ' | 238 option_parser.example = ('%prog instrumentation -I ' |
260 '--test-apk=ChromiumTestShellTest') | 239 '--test-apk=ChromiumTestShellTest') |
261 | 240 |
262 AddJavaTestOptions(option_parser) | 241 AddJavaTestOptions(option_parser) |
263 AddCommonOptions(option_parser) | 242 AddCommonOptions(option_parser) |
264 | 243 |
| 244 option_parser.add_option('-j', '--java_only', action='store_true', |
| 245 default=False, help='Run only the Java tests.') |
| 246 option_parser.add_option('-p', '--python_only', action='store_true', |
| 247 default=False, |
| 248 help='Run only the host-driven tests.') |
| 249 option_parser.add_option('--python_test_root', |
| 250 help='Root of the host-driven tests.') |
265 option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger', | 251 option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger', |
266 action='store_true', | 252 action='store_true', |
267 help='Wait for debugger.') | 253 help='Wait for debugger.') |
268 option_parser.add_option('-I', dest='install_apk', action='store_true', | 254 option_parser.add_option('-I', dest='install_apk', action='store_true', |
269 help='Install test APK.') | 255 help='Install test APK.') |
270 option_parser.add_option( | 256 option_parser.add_option( |
271 '--test-apk', dest='test_apk', | 257 '--test-apk', dest='test_apk', |
272 help=('The name of the apk containing the tests ' | 258 help=('The name of the apk containing the tests ' |
273 '(without the .apk extension; e.g. "ContentShellTest"). ' | 259 '(without the .apk extension; e.g. "ContentShellTest"). ' |
274 'Alternatively, this can be a full path to the apk.')) | 260 'Alternatively, this can be a full path to the apk.')) |
275 | 261 |
276 | 262 |
277 def ProcessInstrumentationOptions(options, error_func): | 263 def ProcessInstrumentationOptions(options, error_func): |
278 """Processes options/arguments and populate |options| with defaults.""" | 264 """Processes options/arguments and populate |options| with defaults.""" |
279 | 265 |
280 ProcessJavaTestOptions(options, error_func) | 266 ProcessJavaTestOptions(options, error_func) |
281 | 267 |
| 268 if options.java_only and options.python_only: |
| 269 error_func('Options java_only (-j) and python_only (-p) ' |
| 270 'are mutually exclusive.') |
| 271 options.run_java_tests = True |
| 272 options.run_python_tests = True |
| 273 if options.java_only: |
| 274 options.run_python_tests = False |
| 275 elif options.python_only: |
| 276 options.run_java_tests = False |
| 277 |
| 278 if not options.python_test_root: |
| 279 options.run_python_tests = False |
| 280 |
282 if not options.test_apk: | 281 if not options.test_apk: |
283 error_func('--test-apk must be specified.') | 282 error_func('--test-apk must be specified.') |
284 | 283 |
285 if os.path.exists(options.test_apk): | 284 if os.path.exists(options.test_apk): |
286 # The APK is fully qualified, assume the JAR lives along side. | 285 # The APK is fully qualified, assume the JAR lives along side. |
287 options.test_apk_path = options.test_apk | 286 options.test_apk_path = options.test_apk |
288 options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] + | 287 options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] + |
289 '.jar') | 288 '.jar') |
290 else: | 289 else: |
291 options.test_apk_path = os.path.join(_SDK_OUT_DIR, | 290 options.test_apk_path = os.path.join(_SDK_OUT_DIR, |
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
427 tests, runner_factory, options.wait_for_debugger, | 426 tests, runner_factory, options.wait_for_debugger, |
428 options.test_device, | 427 options.test_device, |
429 shard=True, | 428 shard=True, |
430 build_type=options.build_type, | 429 build_type=options.build_type, |
431 test_timeout=None, | 430 test_timeout=None, |
432 num_retries=options.num_retries) | 431 num_retries=options.num_retries) |
433 | 432 |
434 results.AddTestRunResults(test_results) | 433 results.AddTestRunResults(test_results) |
435 | 434 |
436 if options.run_python_tests: | 435 if options.run_python_tests: |
437 test_results, test_exit_code = ( | 436 runner_factory, tests = host_driven_setup.InstrumentationSetup( |
438 python_dispatch.DispatchPythonTests(options)) | 437 options.python_test_root, options.official_build, options.annotations, |
| 438 options.exclude_annotations, options.test_filter, options.tool, |
| 439 options.build_type, options.push_deps, options.cleanup_test_files, |
| 440 options.test_apk_path, options.test_apk_jar_path, options.test_data, |
| 441 options.install_apk, options.save_perf_json, |
| 442 options.screenshot_failures, options.wait_for_debugger, |
| 443 options.disable_assertions) |
| 444 |
| 445 test_results, test_exit_code = test_dispatcher.RunTests( |
| 446 tests, runner_factory, False, |
| 447 options.test_device, |
| 448 shard=True, |
| 449 build_type=options.build_type, |
| 450 test_timeout=None, |
| 451 num_retries=options.num_retries) |
439 | 452 |
440 results.AddTestRunResults(test_results) | 453 results.AddTestRunResults(test_results) |
441 | 454 |
442 # Only allow exit code escalation | 455 # Only allow exit code escalation |
443 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | 456 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
444 exit_code = test_exit_code | 457 exit_code = test_exit_code |
445 | 458 |
446 report_results.LogFull( | 459 report_results.LogFull( |
447 results=results, | 460 results=results, |
448 test_type='Instrumentation', | 461 test_type='Instrumentation', |
449 test_package=os.path.basename(options.test_apk), | 462 test_package=os.path.basename(options.test_apk), |
450 annotation=options.annotations, | 463 annotation=options.annotations, |
451 build_type=options.build_type, | 464 build_type=options.build_type, |
452 flakiness_server=options.flakiness_dashboard_server) | 465 flakiness_server=options.flakiness_dashboard_server) |
453 | 466 |
454 return exit_code | 467 return exit_code |
455 | 468 |
456 | 469 |
457 def _RunUIAutomatorTests(options, error_func): | 470 def _RunUIAutomatorTests(options, error_func): |
458 """Subcommand of RunTestsCommands which runs uiautomator tests.""" | 471 """Subcommand of RunTestsCommands which runs uiautomator tests.""" |
459 ProcessUIAutomatorOptions(options, error_func) | 472 ProcessUIAutomatorOptions(options, error_func) |
460 | 473 |
461 results = base_test_result.TestRunResults() | 474 results = base_test_result.TestRunResults() |
462 exit_code = 0 | 475 exit_code = 0 |
463 | 476 |
464 if options.run_java_tests: | 477 runner_factory, tests = uiautomator_setup.Setup( |
465 runner_factory, tests = uiautomator_setup.Setup( | 478 options.uiautomator_jar, options.uiautomator_info_jar, |
466 options.uiautomator_jar, options.uiautomator_info_jar, | 479 options.annotations, options.exclude_annotations, options.test_filter, |
467 options.annotations, options.exclude_annotations, options.test_filter, | 480 options.package_name, options.build_type, options.test_data, |
468 options.package_name, options.build_type, options.test_data, | 481 options.save_perf_json, options.screenshot_failures, options.tool, |
469 options.save_perf_json, options.screenshot_failures, options.tool, | 482 options.disable_assertions, options.push_deps, |
470 options.disable_assertions, options.push_deps, | 483 options.cleanup_test_files) |
471 options.cleanup_test_files) | |
472 | 484 |
473 test_results, exit_code = test_dispatcher.RunTests( | 485 results, exit_code = test_dispatcher.RunTests( |
474 tests, runner_factory, False, options.test_device, | 486 tests, runner_factory, False, options.test_device, |
475 shard=True, | 487 shard=True, |
476 build_type=options.build_type, | 488 build_type=options.build_type, |
477 test_timeout=None, | 489 test_timeout=None, |
478 num_retries=options.num_retries) | 490 num_retries=options.num_retries) |
479 | |
480 results.AddTestRunResults(test_results) | |
481 | |
482 if options.run_python_tests: | |
483 test_results, test_exit_code = ( | |
484 python_dispatch.DispatchPythonTests(options)) | |
485 | |
486 results.AddTestRunResults(test_results) | |
487 | |
488 # Only allow exit code escalation | |
489 if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: | |
490 exit_code = test_exit_code | |
491 | 491 |
492 report_results.LogFull( | 492 report_results.LogFull( |
493 results=results, | 493 results=results, |
494 test_type='UIAutomator', | 494 test_type='UIAutomator', |
495 test_package=os.path.basename(options.test_jar), | 495 test_package=os.path.basename(options.test_jar), |
496 annotation=options.annotations, | 496 annotation=options.annotations, |
497 build_type=options.build_type, | 497 build_type=options.build_type, |
498 flakiness_server=options.flakiness_dashboard_server) | 498 flakiness_server=options.flakiness_dashboard_server) |
499 | 499 |
500 return exit_code | 500 return exit_code |
(...skipping 28 matching lines...) Expand all Loading... |
529 return _RunGTests(options, option_parser.error) | 529 return _RunGTests(options, option_parser.error) |
530 elif command == 'content_browsertests': | 530 elif command == 'content_browsertests': |
531 return _RunContentBrowserTests(options, option_parser.error) | 531 return _RunContentBrowserTests(options, option_parser.error) |
532 elif command == 'instrumentation': | 532 elif command == 'instrumentation': |
533 return _RunInstrumentationTests(options, option_parser.error) | 533 return _RunInstrumentationTests(options, option_parser.error) |
534 elif command == 'uiautomator': | 534 elif command == 'uiautomator': |
535 return _RunUIAutomatorTests(options, option_parser.error) | 535 return _RunUIAutomatorTests(options, option_parser.error) |
536 else: | 536 else: |
537 raise Exception('Unknown test type.') | 537 raise Exception('Unknown test type.') |
538 | 538 |
539 return exit_code | |
540 | |
541 | 539 |
542 def HelpCommand(command, options, args, option_parser): | 540 def HelpCommand(command, options, args, option_parser): |
543 """Display help for a certain command, or overall help. | 541 """Display help for a certain command, or overall help. |
544 | 542 |
545 Args: | 543 Args: |
546 command: String indicating the command that was received to trigger | 544 command: String indicating the command that was received to trigger |
547 this function. | 545 this function. |
548 options: optparse options dictionary. | 546 options: optparse options dictionary. |
549 args: List of extra args from optparse. | 547 args: List of extra args from optparse. |
550 option_parser: optparse.OptionParser object. | 548 option_parser: optparse.OptionParser object. |
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
634 return 0 | 632 return 0 |
635 command = argv[1] | 633 command = argv[1] |
636 VALID_COMMANDS[command].add_options_func(option_parser) | 634 VALID_COMMANDS[command].add_options_func(option_parser) |
637 options, args = option_parser.parse_args(argv) | 635 options, args = option_parser.parse_args(argv) |
638 return VALID_COMMANDS[command].run_command_func( | 636 return VALID_COMMANDS[command].run_command_func( |
639 command, options, args, option_parser) | 637 command, options, args, option_parser) |
640 | 638 |
641 | 639 |
642 if __name__ == '__main__': | 640 if __name__ == '__main__': |
643 sys.exit(main(sys.argv)) | 641 sys.exit(main(sys.argv)) |
OLD | NEW |