Index: build/android/test_runner.py |
diff --git a/build/android/test_runner.py b/build/android/test_runner.py |
index e103db0ddd21c8db1067c259e2261535cc958d86..a11ace33c9b904e4f1b07f669448b35301541994 100755 |
--- a/build/android/test_runner.py |
+++ b/build/android/test_runner.py |
@@ -108,19 +108,6 @@ def AddCommonOptions(option_parser): |
help=('Do not push dependencies to the device. ' |
'Use this at own risk for speeding up test ' |
'execution on local machine.')) |
- # TODO(gkanwar): This option is deprecated. Remove it in the future. |
- option_parser.add_option('--exit-code', action='store_true', |
- help=('(DEPRECATED) If set, the exit code will be ' |
- 'total number of failures.')) |
- # TODO(gkanwar): This option is deprecated. It is currently used to run tests |
- # with the FlakyTest annotation to prevent the bots going red downstream. We |
- # should instead use exit codes and let the Buildbot scripts deal with test |
- # failures appropriately. See crbug.com/170477. |
- option_parser.add_option('--buildbot-step-failure', |
- action='store_true', |
- help=('(DEPRECATED) If present, will set the ' |
- 'buildbot status as STEP_FAILURE, otherwise ' |
- 'as STEP_WARNINGS when test(s) fail.')) |
option_parser.add_option('-d', '--device', dest='test_device', |
help=('Target device for the test suite ' |
'to run on.')) |
@@ -375,25 +362,34 @@ def RunTestsCommand(command, options, args, option_parser): |
Returns: |
Integer indicated exit code. |
+ |
+ Raises: |
+ Exception: Unknown command name passed in. |
frankf
2013/07/08 19:13:43
Same here, is this the only exception that can be
gkanwar
2013/07/08 21:17:07
Updated docstring.
|
""" |
ProcessCommonOptions(options) |
- total_failed = 0 |
if command == 'gtest': |
# TODO(gkanwar): See the emulator TODO above -- this call should either go |
# away or become generalized. |
ProcessEmulatorOptions(options) |
- total_failed = gtest_dispatch.Dispatch(options) |
+ results, exit_code = gtest_dispatch.Dispatch(options) |
elif command == 'content_browsertests': |
- total_failed = browsertests_dispatch.Dispatch(options) |
+ results, exit_code = browsertests_dispatch.Dispatch(options) |
elif command == 'instrumentation': |
ProcessInstrumentationOptions(options, option_parser.error) |
results = base_test_result.TestRunResults() |
+ exit_code = 0 |
if options.run_java_tests: |
- results.AddTestRunResults(instrumentation_dispatch.Dispatch(options)) |
+ test_results, test_exit_code = instrumentation_dispatch.Dispatch(options) |
+ results.AddTestRunResults(test_results) |
+ exit_code = test_exit_code |
frankf
2013/07/08 19:13:43
No need for the intermediate variable
gkanwar
2013/07/08 21:17:07
Done.
|
if options.run_python_tests: |
- results.AddTestRunResults(python_dispatch.DispatchPythonTests(options)) |
+ test_results, test_exit_code = python_dispatch.DispatchPythonTests(options) |
+ results.AddTestRunResults(test_results) |
+ # Only allow exit code escalation |
+ if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
+ exit_code = test_exit_code |
report_results.LogFull( |
results=results, |
test_type='Instrumentation', |
@@ -401,14 +397,20 @@ def RunTestsCommand(command, options, args, option_parser): |
annotation=options.annotations, |
build_type=options.build_type, |
flakiness_server=options.flakiness_dashboard_server) |
- total_failed += len(results.GetNotPass()) |
elif command == 'uiautomator': |
ProcessUIAutomatorOptions(options, option_parser.error) |
results = base_test_result.TestRunResults() |
+ exit_code = 0 |
if options.run_java_tests: |
- results.AddTestRunResults(uiautomator_dispatch.Dispatch(options)) |
+ test_results, test_exit_code = uiautomator_dispatch.Dispatch(options) |
+ results.AddTestRunResults(test_results) |
+ exit_code = test_exit_code |
frankf
2013/07/08 19:13:43
Same here.
gkanwar
2013/07/08 21:17:07
Done.
|
if options.run_python_tests: |
- results.AddTestRunResults(python_dispatch.Dispatch(options)) |
+ test_results, test_exit_code = python_dispatch.Dispatch(options) |
+ results.AddTestRunResults(test_results) |
+ # Only allow exit code escalation |
+ if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
+ exit_code = test_exit_code |
report_results.LogFull( |
results=results, |
test_type='UIAutomator', |
@@ -416,11 +418,10 @@ def RunTestsCommand(command, options, args, option_parser): |
annotation=options.annotations, |
build_type=options.build_type, |
flakiness_server=options.flakiness_dashboard_server) |
- total_failed += len(results.GetNotPass()) |
else: |
raise Exception('Unknown test type state') |
- return total_failed |
+ return exit_code |
def HelpCommand(command, options, args, option_parser): |
@@ -503,6 +504,7 @@ class CommandOptionParser(optparse.OptionParser): |
return '\nExample:\n %s\n' % self.example |
return '' |
+ |
def main(argv): |
option_parser = CommandOptionParser( |
usage='Usage: %prog <command> [options]', |
@@ -517,13 +519,6 @@ def main(argv): |
exit_code = VALID_COMMANDS[command].run_command_func( |
frankf
2013/07/08 19:13:43
Just return inline.
gkanwar
2013/07/08 21:17:07
Done.
|
command, options, args, option_parser) |
- # Failures of individual test suites are communicated by printing a |
- # STEP_FAILURE message. |
- # Returning a success exit status also prevents the buildbot from incorrectly |
- # marking the last suite as failed if there were failures in other suites in |
- # the batch (this happens because the exit status is a sum of all failures |
- # from all suites, but the buildbot associates the exit status only with the |
- # most recent step). |
return exit_code |