Chromium Code Reviews| Index: build/android/test_runner.py |
| diff --git a/build/android/test_runner.py b/build/android/test_runner.py |
| index 08a0be33ada010a99c2a48c4173f7c0adaf9bac6..7a5f17f3c78dd6501885261178517f11e50ffebe 100755 |
| --- a/build/android/test_runner.py |
| +++ b/build/android/test_runner.py |
| @@ -19,12 +19,15 @@ from pylib import cmd_helper |
| from pylib import constants |
| from pylib import ports |
| from pylib.base import base_test_result |
| -from pylib.browsertests import dispatch as browsertests_dispatch |
| -from pylib.gtest import dispatch as gtest_dispatch |
| +from pylib.base import dispatch |
| +from pylib.browsertests import setup as browsertests_setup |
| +from pylib.gtest import setup as gtest_setup |
| +from pylib.gtest import gtest_config |
| from pylib.host_driven import run_python_tests as python_dispatch |
| -from pylib.instrumentation import dispatch as instrumentation_dispatch |
| -from pylib.uiautomator import dispatch as uiautomator_dispatch |
| -from pylib.utils import emulator, report_results, run_tests_helper |
| +from pylib.instrumentation import setup as instrumentation_setup |
| +from pylib.uiautomator import setup as uiautomator_setup |
| +from pylib.utils import report_results |
| +from pylib.utils import run_tests_helper |
| _SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out') |
| @@ -45,29 +48,7 @@ def AddBuildTypeOption(option_parser): |
| ' Default is env var BUILDTYPE or Debug.')) |
| -def AddEmulatorOptions(option_parser): |
| - """Adds all emulator-related options to |option_parser|.""" |
| - |
| - # TODO(gkanwar): Figure out what we're doing with the emulator setup |
| - # and determine whether these options should be deprecated/removed. |
| - option_parser.add_option('-e', '--emulator', dest='use_emulator', |
| - action='store_true', |
| - help='Run tests in a new instance of emulator.') |
| - option_parser.add_option('-n', '--emulator-count', |
| - type='int', default=1, |
| - help=('Number of emulators to launch for ' |
| - 'running the tests.')) |
| - option_parser.add_option('--abi', default='armeabi-v7a', |
| - help='Platform of emulators to launch.') |
| - |
| - |
| -def ProcessEmulatorOptions(options): |
| - """Processes emulator options.""" |
| - if options.use_emulator: |
| - emulator.DeleteAllTempAVDs() |
| - |
| - |
| -def AddCommonOptions(option_parser): |
| +def AddCommonOptions(option_parser, default_timeout=60): |
| """Adds all common options to |option_parser|.""" |
| AddBuildTypeOption(option_parser) |
| @@ -111,6 +92,10 @@ def AddCommonOptions(option_parser): |
| option_parser.add_option('-d', '--device', dest='test_device', |
| help=('Target device for the test suite ' |
| 'to run on.')) |
| + option_parser.add_option('-t', dest='timeout', |
| + help='Timeout to wait for each test', |
| + type='int', |
| + default=default_timeout) |
| def ProcessCommonOptions(options): |
| @@ -120,20 +105,15 @@ def ProcessCommonOptions(options): |
| run_tests_helper.SetLogLevel(options.verbose_count) |
| -def AddCoreGTestOptions(option_parser, default_timeout=60): |
| +def AddCoreGTestOptions(option_parser): |
| """Add options specific to the gtest framework to |option_parser|.""" |
| # TODO(gkanwar): Consolidate and clean up test filtering for gtests and |
| # content_browsertests. |
| - option_parser.add_option('--gtest_filter', dest='test_filter', |
| + option_parser.add_option('--gtest_filter', dest='gtest_filter', |
| help='Filter GTests by name.') |
| option_parser.add_option('-a', '--test_arguments', dest='test_arguments', |
| help='Additional arguments to pass to the test.') |
| - # TODO(gkanwar): Most likely deprecate/remove this option once we've pinned |
| - # down what we're doing with the emulator setup. |
| - option_parser.add_option('-x', '--xvfb', dest='use_xvfb', |
| - action='store_true', |
| - help='Use Xvfb around tests (ignored if not Linux).') |
| # TODO(gkanwar): Possible deprecate this flag. Waiting on word from Peter |
| # Beverloo. |
| option_parser.add_option('--webkit', action='store_true', |
| @@ -141,10 +121,6 @@ def AddCoreGTestOptions(option_parser, default_timeout=60): |
| option_parser.add_option('--exe', action='store_true', |
| help='If set, use the exe test runner instead of ' |
| 'the APK.') |
| - option_parser.add_option('-t', dest='timeout', |
| - help='Timeout to wait for each test', |
| - type='int', |
| - default=default_timeout) |
| def AddContentBrowserTestOptions(option_parser): |
| @@ -165,16 +141,33 @@ def AddGTestOptions(option_parser): |
| option_parser.command_list = [] |
| option_parser.example = '%prog gtest -s base_unittests' |
| + # TODO(gkanwar): Make this option required |
|
frankf
2013/07/16 00:02:18
I was thinking that content_browsertests should ju
gkanwar
2013/07/16 00:47:03
That seems reasonable. I think there's some amount
|
| option_parser.add_option('-s', '--suite', dest='test_suite', |
| help=('Executable name of the test suite to run ' |
| '(use -s help to list them).')) |
| AddCoreGTestOptions(option_parser) |
| # TODO(gkanwar): Move these to Common Options once we have the plumbing |
| # in our other test types to handle these commands |
| - AddEmulatorOptions(option_parser) |
| AddCommonOptions(option_parser) |
| +def ProcessGTestOptions(options): |
| + """Intercept test suite help to list test suites. |
| + |
| + Args: |
| + options: command line options. |
| + |
| + Returns: |
| + True if the command should continue. |
| + """ |
| + if options.test_suite == 'help': |
| + print 'Available test suites are:' |
| + for test_suite in gtest_config.STABLE_TEST_SUITES: |
| + print test_suite |
| + return False |
| + return True |
| + |
| + |
| def AddJavaTestOptions(option_parser): |
| """Adds the Java test options to |option_parser|.""" |
| @@ -201,10 +194,6 @@ def AddJavaTestOptions(option_parser): |
| help='Capture screenshots of test failures') |
| option_parser.add_option('--save-perf-json', action='store_true', |
| help='Saves the JSON file for each UI Perf test.') |
| - # TODO(gkanwar): Remove this option. It is not used anywhere. |
| - option_parser.add_option('--shard_retries', type=int, default=1, |
| - help=('Number of times to retry each failure when ' |
| - 'sharding.')) |
| option_parser.add_option('--official-build', help='Run official build tests.') |
| option_parser.add_option('--python_test_root', |
| help='Root of the host-driven tests.') |
| @@ -351,6 +340,151 @@ def ProcessUIAutomatorOptions(options, error_func): |
| '_java.jar') |
| +def _RunGTests(options, error_func): |
| + """Subcommand of RunTestsCommands which runs gtests.""" |
| + if not ProcessGTestOptions(options): |
| + return 0 |
| + |
| + tests_dict = gtest_setup.Setup( |
| + options.exe, options.test_suite, options.test_arguments, |
| + options.timeout, options.cleanup_test_files, options.tool, |
| + options.build_type, options.webkit, options.push_deps, |
| + options.gtest_filter) |
| + |
| + exit_code = 0 |
| + results = base_test_result.TestRunResults() |
| + for suite_name in tests_dict: |
| + runner_factory, tests = tests_dict[suite_name] |
| + test_results, test_exit_code = dispatch.ShardAndRunTests( |
| + tests, False, options.test_device, runner_factory, |
| + build_type=options.build_type, |
| + test_timeout=options.timeout, |
| + num_retries=options.num_retries) |
| + results.AddTestRunResults(test_results) |
| + if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
| + exit_code = test_exit_code |
| + |
| + report_results.LogFull( |
| + results=results, |
| + test_type='Unit test', |
| + test_package=options.test_suite, |
| + build_type=options.build_type, |
| + flakiness_server=options.flakiness_dashboard_server) |
| + |
| + return exit_code |
| + |
| + |
| +def _RunContentBrowserTests(options, error_func): |
| + """Subcommand of RunTestsCommands which runs content_browsertests.""" |
| + runner_factory, tests = browsertests_setup.Setup( |
| + options.test_arguments, options.timeout, options.cleanup_test_files, |
| + options.tool, options.build_type, options.webkit, options.push_deps, |
| + options.gtest_filter) |
| + |
| + # TODO(nileshagrawal): remove this abnormally long setup timeout once fewer |
| + # files are pushed to the devices for content_browsertests: crbug.com/138275 |
| + setup_timeout = 20 * 60 # 20 minutes |
| + results, exit_code = dispatch.ShardAndRunTests( |
| + tests, False, options.test_device, runner_factory, |
| + build_type=options.build_type, |
| + test_timeout=options.timeout, |
| + setup_timeout=setup_timeout, |
| + num_retries=options.num_retries) |
| + |
| + report_results.LogFull( |
| + results=results, |
| + test_type='Unit test', |
| + test_package=constants.BROWSERTEST_SUITE_NAME, |
| + build_type=options.build_type, |
| + flakiness_server=options.flakiness_dashboard_server) |
| + |
| + return exit_code |
| + |
| + |
| +def _RunInstrumentationTests(options, error_func): |
| + """Subcommand of RunTestsCommands which runs instrumentation tests.""" |
| + ProcessInstrumentationOptions(options, error_func) |
| + |
| + results = base_test_result.TestRunResults() |
| + exit_code = 0 |
| + |
| + if options.run_java_tests: |
| + runner_factory, tests = instrumentation_setup.Setup( |
| + options.test_apk_path, options.test_apk_jar_path, options.annotations, |
| + options.exclude_annotations, options.test_filter, options.build_type, |
| + options.test_data, options.install_apk, options.save_perf_json, |
| + options.screenshot_failures, options.tool, options.wait_for_debugger, |
| + options.disable_assertions, options.push_deps) |
| + |
| + test_results, exit_code = dispatch.ShardAndRunTests( |
| + tests, options.wait_for_debugger, options.test_device, runner_factory, |
| + build_type=options.build_type, |
| + test_timeout=options.timeout, |
| + num_retries=options.num_retries) |
| + |
| + results.AddTestRunResults(test_results) |
| + |
| + if options.run_python_tests: |
| + test_results, test_exit_code = (python_dispatch. |
| + DispatchPythonTests(options)) |
| + results.AddTestRunResults(test_results) |
| + # Only allow exit code escalation |
| + if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
| + exit_code = test_exit_code |
| + |
| + report_results.LogFull( |
| + results=results, |
| + test_type='Instrumentation', |
| + test_package=os.path.basename(options.test_apk), |
| + annotation=options.annotations, |
| + build_type=options.build_type, |
| + flakiness_server=options.flakiness_dashboard_server) |
| + |
| + return exit_code |
| + |
| + |
| +def _RunUIAutomatorTests(options, error_func): |
| + """Subcommand of RunTestsCommands which runs uiautomator tests.""" |
| + ProcessUIAutomatorOptions(options, error_func) |
| + |
| + results = base_test_result.TestRunResults() |
| + exit_code = 0 |
| + |
| + if options.run_java_tests: |
| + runner_factory, tests = uiautomator_setup.Setup( |
| + options.uiautomator_jar, options.uiautomator_info_jar, |
| + options.annotations, options.exclude_annotations, options.test_filter, |
| + options.package_name, options.build_type, options.test_data, |
| + options.save_perf_json, options.screenshot_failures, options.tool, |
| + options.disable_assertions, options.push_deps) |
| + |
| + test_results, exit_code = dispatch.ShardAndRunTests( |
| + tests, False, options.test_device, runner_factory, |
| + build_type=options.build_type, |
| + test_timeout=options.timeout, |
| + num_retries=options.num_retries) |
| + |
| + results.AddTestRunResults(test_results) |
| + |
| + if options.run_python_tests: |
| + test_results, test_exit_code = (python_dispatch. |
| + DispatchPythonTests(options)) |
| + results.AddTestRunResults(test_results) |
| + # Only allow exit code escalation |
| + if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
| + exit_code = test_exit_code |
| + |
| + report_results.LogFull( |
| + results=results, |
| + test_type='UIAutomator', |
| + test_package=os.path.basename(options.test_jar), |
| + annotation=options.annotations, |
| + build_type=options.build_type, |
| + flakiness_server=options.flakiness_dashboard_server) |
| + |
| + return exit_code |
| + |
| + |
| def RunTestsCommand(command, options, args, option_parser): |
| """Checks test type and dispatches to the appropriate function. |
| @@ -372,54 +506,13 @@ def RunTestsCommand(command, options, args, option_parser): |
| ProcessCommonOptions(options) |
| if command == 'gtest': |
| - # TODO(gkanwar): See the emulator TODO above -- this call should either go |
| - # away or become generalized. |
| - ProcessEmulatorOptions(options) |
| - results, exit_code = gtest_dispatch.Dispatch(options) |
| + return _RunGTests(options, option_parser.error) |
| elif command == 'content_browsertests': |
| - results, exit_code = browsertests_dispatch.Dispatch(options) |
| + return _RunContentBrowserTests(options, option_parser.error) |
| elif command == 'instrumentation': |
| - ProcessInstrumentationOptions(options, option_parser.error) |
| - results = base_test_result.TestRunResults() |
| - exit_code = 0 |
| - if options.run_java_tests: |
| - test_results, exit_code = instrumentation_dispatch.Dispatch(options) |
| - results.AddTestRunResults(test_results) |
| - if options.run_python_tests: |
| - test_results, test_exit_code = (python_dispatch. |
| - DispatchPythonTests(options)) |
| - results.AddTestRunResults(test_results) |
| - # Only allow exit code escalation |
| - if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
| - exit_code = test_exit_code |
| - report_results.LogFull( |
| - results=results, |
| - test_type='Instrumentation', |
| - test_package=os.path.basename(options.test_apk), |
| - annotation=options.annotations, |
| - build_type=options.build_type, |
| - flakiness_server=options.flakiness_dashboard_server) |
| + return _RunInstrumentationTests(options, option_parser.error) |
| elif command == 'uiautomator': |
| - ProcessUIAutomatorOptions(options, option_parser.error) |
| - results = base_test_result.TestRunResults() |
| - exit_code = 0 |
| - if options.run_java_tests: |
| - test_results, exit_code = uiautomator_dispatch.Dispatch(options) |
| - results.AddTestRunResults(test_results) |
| - if options.run_python_tests: |
| - test_results, test_exit_code = (python_dispatch. |
| - DispatchPythonTests(options)) |
| - results.AddTestRunResults(test_results) |
| - # Only allow exit code escalation |
| - if test_exit_code and exit_code != constants.ERROR_EXIT_CODE: |
| - exit_code = test_exit_code |
| - report_results.LogFull( |
| - results=results, |
| - test_type='UIAutomator', |
| - test_package=os.path.basename(options.test_jar), |
| - annotation=options.annotations, |
| - build_type=options.build_type, |
| - flakiness_server=options.flakiness_dashboard_server) |
| + return _RunUIAutomatorTests(options, option_parser.error) |
| else: |
| raise Exception('Unknown test type state') |