Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 #!/usr/bin/env python | 1 #!/usr/bin/env python |
| 2 # | 2 # |
| 3 # Copyright 2013 The Chromium Authors. All rights reserved. | 3 # Copyright 2013 The Chromium Authors. All rights reserved. |
| 4 # Use of this source code is governed by a BSD-style license that can be | 4 # Use of this source code is governed by a BSD-style license that can be |
| 5 # found in the LICENSE file. | 5 # found in the LICENSE file. |
| 6 | 6 |
| 7 """Runs all types of tests from one unified interface. | 7 """Runs all types of tests from one unified interface. |
| 8 | 8 |
| 9 TODO(gkanwar): | 9 TODO(gkanwar): |
| 10 * Add options to run Monkey tests. | 10 * Add options to run Monkey tests. |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 101 '(use --tool help to list them)')) | 101 '(use --tool help to list them)')) |
| 102 option_parser.add_option('--flakiness-dashboard-server', | 102 option_parser.add_option('--flakiness-dashboard-server', |
| 103 dest='flakiness_dashboard_server', | 103 dest='flakiness_dashboard_server', |
| 104 help=('Address of the server that is hosting the ' | 104 help=('Address of the server that is hosting the ' |
| 105 'Chrome for Android flakiness dashboard.')) | 105 'Chrome for Android flakiness dashboard.')) |
| 106 option_parser.add_option('--skip-deps-push', dest='push_deps', | 106 option_parser.add_option('--skip-deps-push', dest='push_deps', |
| 107 action='store_false', default=True, | 107 action='store_false', default=True, |
| 108 help=('Do not push dependencies to the device. ' | 108 help=('Do not push dependencies to the device. ' |
| 109 'Use this at own risk for speeding up test ' | 109 'Use this at own risk for speeding up test ' |
| 110 'execution on local machine.')) | 110 'execution on local machine.')) |
| 111 # TODO(gkanwar): This option is deprecated. Remove it in the future. | |
| 112 option_parser.add_option('--exit-code', action='store_true', | |
| 113 help=('(DEPRECATED) If set, the exit code will be ' | |
| 114 'total number of failures.')) | |
| 115 # TODO(gkanwar): This option is deprecated. It is currently used to run tests | 111 # TODO(gkanwar): This option is deprecated. It is currently used to run tests |
| 116 # with the FlakyTest annotation to prevent the bots going red downstream. We | 112 # with the FlakyTest annotation to prevent the bots going red downstream. We |
| 117 # should instead use exit codes and let the Buildbot scripts deal with test | 113 # should instead use exit codes and let the Buildbot scripts deal with test |
| 118 # failures appropriately. See crbug.com/170477. | 114 # failures appropriately. See crbug.com/170477. |
| 119 option_parser.add_option('--buildbot-step-failure', | 115 option_parser.add_option('--buildbot-step-failure', |
|
frankf
2013/07/03 21:26:37
I think this is related to this CL, should we addr
gkanwar
2013/07/03 23:15:18
I wasn't sure if we wanted to leave more time befo
| |
| 120 action='store_true', | 116 action='store_true', |
| 121 help=('(DEPRECATED) If present, will set the ' | 117 help=('(DEPRECATED) If present, will set the ' |
| 122 'buildbot status as STEP_FAILURE, otherwise ' | 118 'buildbot status as STEP_FAILURE, otherwise ' |
| 123 'as STEP_WARNINGS when test(s) fail.')) | 119 'as STEP_WARNINGS when test(s) fail.')) |
| 124 option_parser.add_option('-d', '--device', dest='test_device', | 120 option_parser.add_option('-d', '--device', dest='test_device', |
| 125 help=('Target device for the test suite ' | 121 help=('Target device for the test suite ' |
| 126 'to run on.')) | 122 'to run on.')) |
| 127 | 123 |
| 128 | 124 |
| 129 def ProcessCommonOptions(options): | 125 def ProcessCommonOptions(options): |
| (...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 368 | 364 |
| 369 Args: | 365 Args: |
| 370 command: String indicating the command that was received to trigger | 366 command: String indicating the command that was received to trigger |
| 371 this function. | 367 this function. |
| 372 options: optparse options dictionary. | 368 options: optparse options dictionary. |
| 373 args: List of extra args from optparse. | 369 args: List of extra args from optparse. |
| 374 option_parser: optparse.OptionParser object. | 370 option_parser: optparse.OptionParser object. |
| 375 | 371 |
| 376 Returns: | 372 Returns: |
| 377 Integer indicated exit code. | 373 Integer indicated exit code. |
| 374 | |
| 375 Raises: | |
| 376 Exception: Unknown command name passed in. | |
| 378 """ | 377 """ |
| 379 | 378 |
| 380 ProcessCommonOptions(options) | 379 ProcessCommonOptions(options) |
| 381 | 380 |
| 382 total_failed = 0 | |
| 383 if command == 'gtest': | 381 if command == 'gtest': |
| 384 # TODO(gkanwar): See the emulator TODO above -- this call should either go | 382 # TODO(gkanwar): See the emulator TODO above -- this call should either go |
| 385 # away or become generalized. | 383 # away or become generalized. |
| 386 ProcessEmulatorOptions(options) | 384 ProcessEmulatorOptions(options) |
| 387 total_failed = gtest_dispatch.Dispatch(options) | 385 results = gtest_dispatch.Dispatch(options) |
| 386 exit_code = results.exit_code | |
| 388 elif command == 'content_browsertests': | 387 elif command == 'content_browsertests': |
| 389 total_failed = browsertests_dispatch.Dispatch(options) | 388 results = browsertests_dispatch.Dispatch(options) |
| 389 exit_code = results.exit_code | |
| 390 elif command == 'instrumentation': | 390 elif command == 'instrumentation': |
| 391 ProcessInstrumentationOptions(options, option_parser.error) | 391 ProcessInstrumentationOptions(options, option_parser.error) |
| 392 results = base_test_result.TestRunResults() | 392 results = base_test_result.TestRunResults() |
| 393 if options.run_java_tests: | 393 if options.run_java_tests: |
| 394 results.AddTestRunResults(instrumentation_dispatch.Dispatch(options)) | 394 results.AddTestRunResults(instrumentation_dispatch.Dispatch(options)) |
| 395 if options.run_python_tests: | 395 if options.run_python_tests: |
| 396 results.AddTestRunResults(python_dispatch.DispatchPythonTests(options)) | 396 results.AddTestRunResults(python_dispatch.DispatchPythonTests(options)) |
| 397 report_results.LogFull( | 397 report_results.LogFull( |
| 398 results=results, | 398 results=results, |
| 399 test_type='Instrumentation', | 399 test_type='Instrumentation', |
| 400 test_package=os.path.basename(options.test_apk), | 400 test_package=os.path.basename(options.test_apk), |
| 401 annotation=options.annotations, | 401 annotation=options.annotations, |
| 402 build_type=options.build_type, | 402 build_type=options.build_type, |
| 403 flakiness_server=options.flakiness_dashboard_server) | 403 flakiness_server=options.flakiness_dashboard_server) |
| 404 total_failed += len(results.GetNotPass()) | 404 exit_code = results.exit_code |
| 405 elif command == 'uiautomator': | 405 elif command == 'uiautomator': |
| 406 ProcessUIAutomatorOptions(options, option_parser.error) | 406 ProcessUIAutomatorOptions(options, option_parser.error) |
| 407 results = base_test_result.TestRunResults() | 407 results = base_test_result.TestRunResults() |
| 408 if options.run_java_tests: | 408 if options.run_java_tests: |
| 409 results.AddTestRunResults(uiautomator_dispatch.Dispatch(options)) | 409 results.AddTestRunResults(uiautomator_dispatch.Dispatch(options)) |
| 410 if options.run_python_tests: | 410 if options.run_python_tests: |
| 411 results.AddTestRunResults(python_dispatch.Dispatch(options)) | 411 results.AddTestRunResults(python_dispatch.Dispatch(options)) |
| 412 report_results.LogFull( | 412 report_results.LogFull( |
| 413 results=results, | 413 results=results, |
| 414 test_type='UIAutomator', | 414 test_type='UIAutomator', |
| 415 test_package=os.path.basename(options.test_jar), | 415 test_package=os.path.basename(options.test_jar), |
| 416 annotation=options.annotations, | 416 annotation=options.annotations, |
| 417 build_type=options.build_type, | 417 build_type=options.build_type, |
| 418 flakiness_server=options.flakiness_dashboard_server) | 418 flakiness_server=options.flakiness_dashboard_server) |
| 419 total_failed += len(results.GetNotPass()) | 419 exit_code = results.exit_code |
| 420 else: | 420 else: |
| 421 raise Exception('Unknown test type state') | 421 raise Exception('Unknown test type state') |
| 422 | 422 |
| 423 return total_failed | 423 return exit_code |
| 424 | 424 |
| 425 | 425 |
| 426 def HelpCommand(command, options, args, option_parser): | 426 def HelpCommand(command, options, args, option_parser): |
| 427 """Display help for a certain command, or overall help. | 427 """Display help for a certain command, or overall help. |
| 428 | 428 |
| 429 Args: | 429 Args: |
| 430 command: String indicating the command that was received to trigger | 430 command: String indicating the command that was received to trigger |
| 431 this function. | 431 this function. |
| 432 options: optparse options dictionary. | 432 options: optparse options dictionary. |
| 433 args: List of extra args from optparse. | 433 args: List of extra args from optparse. |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 496 def get_command_list(self): | 496 def get_command_list(self): |
| 497 if self.command_list: | 497 if self.command_list: |
| 498 return '\nCommands:\n %s\n' % '\n '.join(sorted(self.command_list)) | 498 return '\nCommands:\n %s\n' % '\n '.join(sorted(self.command_list)) |
| 499 return '' | 499 return '' |
| 500 | 500 |
| 501 def get_example(self): | 501 def get_example(self): |
| 502 if self.example: | 502 if self.example: |
| 503 return '\nExample:\n %s\n' % self.example | 503 return '\nExample:\n %s\n' % self.example |
| 504 return '' | 504 return '' |
| 505 | 505 |
| 506 | |
| 506 def main(argv): | 507 def main(argv): |
| 507 option_parser = CommandOptionParser( | 508 option_parser = CommandOptionParser( |
| 508 usage='Usage: %prog <command> [options]', | 509 usage='Usage: %prog <command> [options]', |
| 509 command_list=VALID_COMMANDS.keys()) | 510 command_list=VALID_COMMANDS.keys()) |
| 510 | 511 |
| 511 if len(argv) < 2 or argv[1] not in VALID_COMMANDS: | 512 if len(argv) < 2 or argv[1] not in VALID_COMMANDS: |
| 512 option_parser.print_help() | 513 option_parser.print_help() |
| 513 return 0 | 514 return 0 |
| 514 command = argv[1] | 515 command = argv[1] |
| 515 VALID_COMMANDS[command].add_options_func(option_parser) | 516 VALID_COMMANDS[command].add_options_func(option_parser) |
| 516 options, args = option_parser.parse_args(argv) | 517 options, args = option_parser.parse_args(argv) |
| 517 exit_code = VALID_COMMANDS[command].run_command_func( | 518 exit_code = VALID_COMMANDS[command].run_command_func( |
| 518 command, options, args, option_parser) | 519 command, options, args, option_parser) |
| 519 | 520 |
| 520 # Failures of individual test suites are communicated by printing a | |
| 521 # STEP_FAILURE message. | |
| 522 # Returning a success exit status also prevents the buildbot from incorrectly | |
| 523 # marking the last suite as failed if there were failures in other suites in | |
| 524 # the batch (this happens because the exit status is a sum of all failures | |
| 525 # from all suites, but the buildbot associates the exit status only with the | |
| 526 # most recent step). | |
| 527 return exit_code | 521 return exit_code |
| 528 | 522 |
| 529 | 523 |
| 530 if __name__ == '__main__': | 524 if __name__ == '__main__': |
| 531 sys.exit(main(sys.argv)) | 525 sys.exit(main(sys.argv)) |
| OLD | NEW |