Index: build/android/test_runner.py |
diff --git a/build/android/test_runner.py b/build/android/test_runner.py |
index f50bc7cb82b131efb488617d89f70796d84c7f5c..f7f3828bb2de246ad846c60e217807f126acdff3 100755 |
--- a/build/android/test_runner.py |
+++ b/build/android/test_runner.py |
@@ -16,15 +16,14 @@ import os |
import shutil |
import sys |
-from pylib import cmd_helper |
from pylib import constants |
from pylib import ports |
from pylib.base import base_test_result |
from pylib.base import test_dispatcher |
from pylib.browsertests import setup as browsertests_setup |
-from pylib.gtest import setup as gtest_setup |
from pylib.gtest import gtest_config |
-from pylib.host_driven import run_python_tests as python_dispatch |
+from pylib.gtest import setup as gtest_setup |
+from pylib.host_driven import setup as host_driven_setup |
from pylib.instrumentation import setup as instrumentation_setup |
from pylib.uiautomator import setup as uiautomator_setup |
from pylib.utils import report_results |
@@ -184,19 +183,12 @@ def AddJavaTestOptions(option_parser): |
'-E', '--exclude-annotation', dest='exclude_annotation_str', |
help=('Comma-separated list of annotations. Exclude tests with these ' |
'annotations.')) |
- option_parser.add_option('-j', '--java_only', action='store_true', |
- default=False, help='Run only the Java tests.') |
- option_parser.add_option('-p', '--python_only', action='store_true', |
- default=False, |
- help='Run only the host-driven tests.') |
option_parser.add_option('--screenshot', dest='screenshot_failures', |
action='store_true', |
help='Capture screenshots of test failures') |
option_parser.add_option('--save-perf-json', action='store_true', |
help='Saves the JSON file for each UI Perf test.') |
option_parser.add_option('--official-build', help='Run official build tests.') |
- option_parser.add_option('--python_test_root', |
- help='Root of the host-driven tests.') |
option_parser.add_option('--keep_test_server_ports', |
action='store_true', |
help=('Indicates the test server ports must be ' |
@@ -220,19 +212,6 @@ def AddJavaTestOptions(option_parser): |
def ProcessJavaTestOptions(options, error_func): |
"""Processes options/arguments and populates |options| with defaults.""" |
- if options.java_only and options.python_only: |
- error_func('Options java_only (-j) and python_only (-p) ' |
- 'are mutually exclusive.') |
- options.run_java_tests = True |
- options.run_python_tests = True |
- if options.java_only: |
- options.run_python_tests = False |
- elif options.python_only: |
- options.run_java_tests = False |
- |
- if not options.python_test_root: |
- options.run_python_tests = False |
- |
if options.annotation_str: |
options.annotations = options.annotation_str.split(',') |
elif options.test_filter: |
@@ -262,6 +241,13 @@ def AddInstrumentationTestOptions(option_parser): |
AddJavaTestOptions(option_parser) |
AddCommonOptions(option_parser) |
+ option_parser.add_option('-j', '--java_only', action='store_true', |
+ default=False, help='Run only the Java tests.') |
+ option_parser.add_option('-p', '--python_only', action='store_true', |
+ default=False, |
+ help='Run only the host-driven tests.') |
+ option_parser.add_option('--python_test_root', |
+ help='Root of the host-driven tests.') |
option_parser.add_option('-w', '--wait_debugger', dest='wait_for_debugger', |
action='store_true', |
help='Wait for debugger.') |
@@ -279,6 +265,19 @@ def ProcessInstrumentationOptions(options, error_func): |
ProcessJavaTestOptions(options, error_func) |
+ if options.java_only and options.python_only: |
+ error_func('Options java_only (-j) and python_only (-p) ' |
+ 'are mutually exclusive.') |
+ options.run_java_tests = True |
+ options.run_python_tests = True |
+ if options.java_only: |
+ options.run_python_tests = False |
+ elif options.python_only: |
+ options.run_java_tests = False |
+ |
+ if not options.python_test_root: |
+ options.run_python_tests = False |
+ |
if not options.test_apk: |
error_func('--test-apk must be specified.') |
@@ -434,8 +433,22 @@ def _RunInstrumentationTests(options, error_func): |
results.AddTestRunResults(test_results) |
if options.run_python_tests: |
- test_results, test_exit_code = ( |
- python_dispatch.DispatchPythonTests(options)) |
+ runner_factory, tests = host_driven_setup.InstrumentationSetup( |
+ options.python_test_root, options.official_build, options.annotations, |
+ options.exclude_annotations, options.test_filter, options.tool, |
+ options.build_type, options.push_deps, options.cleanup_test_files, |
+ options.test_apk_path, options.test_apk_jar_path, options.test_data, |
+ options.install_apk, options.save_perf_json, |
+ options.screenshot_failures, options.wait_for_debugger, |
+ options.disable_assertions) |
+ |
+ test_results, test_exit_code = test_dispatcher.RunTests( |
+ tests, runner_factory, False, |
+ options.test_device, |
+ shard=True, |
+ build_type=options.build_type, |
+ test_timeout=None, |
+ num_retries=options.num_retries) |
results.AddTestRunResults(test_results) |
@@ -461,33 +474,20 @@ def _RunUIAutomatorTests(options, error_func): |
results = base_test_result.TestRunResults() |
exit_code = 0 |
- if options.run_java_tests: |
- runner_factory, tests = uiautomator_setup.Setup( |
- options.uiautomator_jar, options.uiautomator_info_jar, |
- options.annotations, options.exclude_annotations, options.test_filter, |
- options.package_name, options.build_type, options.test_data, |
- options.save_perf_json, options.screenshot_failures, options.tool, |
- options.disable_assertions, options.push_deps, |
- options.cleanup_test_files) |
+ runner_factory, tests = uiautomator_setup.Setup( |
+ options.uiautomator_jar, options.uiautomator_info_jar, |
+ options.annotations, options.exclude_annotations, options.test_filter, |
+ options.package_name, options.build_type, options.test_data, |
+ options.save_perf_json, options.screenshot_failures, options.tool, |
+ options.disable_assertions, options.push_deps, |
+ options.cleanup_test_files) |
- test_results, exit_code = test_dispatcher.RunTests( |
- tests, runner_factory, False, options.test_device, |
- shard=True, |
- build_type=options.build_type, |
- test_timeout=None, |
- num_retries=options.num_retries) |
- |
- results.AddTestRunResults(test_results) |
- |
- if options.run_python_tests: |
- test_results, test_exit_code = ( |
- python_dispatch.DispatchPythonTests(options)) |
- |
- results.AddTestRunResults(test_results) |
- |
- # Only allow exit code escalation |
- if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
- exit_code = test_exit_code |
+ results, exit_code = test_dispatcher.RunTests( |
+ tests, runner_factory, False, options.test_device, |
+ shard=True, |
+ build_type=options.build_type, |
+ test_timeout=None, |
+ num_retries=options.num_retries) |
report_results.LogFull( |
results=results, |
@@ -536,8 +536,6 @@ def RunTestsCommand(command, options, args, option_parser): |
else: |
raise Exception('Unknown test type.') |
- return exit_code |
- |
def HelpCommand(command, options, args, option_parser): |
"""Display help for a certain command, or overall help. |