Index: build/android/run_all_tests.py |
diff --git a/build/android/run_all_tests.py b/build/android/run_all_tests.py |
new file mode 100755 |
index 0000000000000000000000000000000000000000..6787c4d0d49fdd6e3e5ab1ccf7434ede0ff5a590 |
--- /dev/null |
+++ b/build/android/run_all_tests.py |
@@ -0,0 +1,415 @@ |
+#!/usr/bin/env python |
+# |
+# Copyright (c) 2013 The Chromium Authors. All rights reserved. |
+# Use of this source code is governed by a BSD-style license that can be |
+# found in the LICENSE file. |
+ |
+"""Runs all types of tests from one unified interface. |
+ |
+Types of tests supported: |
+1. GTest native unit tests (gtests) |
+ Example: ./run_all_tests.py gtests -s android_webview_unittests |
frankf
2013/06/13 23:17:48
It'd be good to give such canonical examples on --
gkanwar
2013/06/17 21:04:43
Done.
|
+2. ContentBrowser unit tests (content_browsertests) |
+ Example: ./adb_install_apk.py --apk=ContentShell.apk |
+ ./run_all_tests.py content_browsertests --test-apk=ContentShellTest |
frankf
2013/06/13 23:17:48
Why does this take --test-apk. These tests don't t
gkanwar
2013/06/17 21:04:43
Done.
|
+3. Instrumentation tests (instrumentationtests): Both Python host-driven and |
+ Java instrumentation tests are run by default. Use --python_only or |
+ --java_only to select one or the other. |
+ Example: ./adb_install_apk.py --apk=ChromiumTestShellTest.apk |
frankf
2013/06/13 23:17:48
this is already done by -I option. Please update t
gkanwar
2013/06/17 21:04:43
Done.
|
+ ./run_all_tests.py instrumentationtests |
+ --test-apk=ChromiumTestShellTest |
+4. UIAutomator tests (uiautomatortests): Both Python host-driven and Java |
+ UIAutomator tests are run by default. Use --python_only or --java_only to |
+ select one or the other. |
+ Example: ./run_all_tests.py uiautomatortests |
+ --test-jar=chromium_testshell_uiautomator_tests |
+ --package-name=org.chromium.chrome.testshell |
+ |
+TODO(gkanwar): |
+* Incorporate the functionality of adb_install_apk.py to allow |
+ installing the APK for a test in the same command as running the test. |
+* Add options to run Monkey tests. |
+""" |
+ |
+import optparse |
+import os |
+import sys |
+ |
+from pylib import cmd_helper |
+from pylib import constants |
+from pylib import ports |
+from pylib.browsertests import dispatch as browsertests_dispatch |
+from pylib.gtest import dispatch as gtest_dispatch |
+from pylib.host_driven import run_python_tests as python_dispatch |
+from pylib.instrumentation import dispatch as instrumentation_dispatch |
+from pylib.uiautomator import dispatch as uiautomator_dispatch |
+from pylib.utils import emulator |
+from pylib.utils import run_tests_helper |
+ |
+_SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out') |
+VALID_TEST_TYPES = ["gtests", "content_browsertests", "instrumentationtests", |
frankf
2013/06/13 23:17:48
Use single quotes consistantly. Please run gpylint
gkanwar
2013/06/17 21:04:43
Done.
|
+ "uiautomatortests"] |
frankf
2013/06/13 23:17:48
instrumentationtests -> instrumentation
uiautomato
gkanwar
2013/06/17 21:04:43
Done.
|
+ |
+ |
+def ValidateTestTypeArg(options, args, option_parser): |
+ """Validates that the first arg is a valid test type keyword.""" |
+ if len(args) < 2: |
+ option_parser.error("You must specify a test type.") |
frankf
2013/06/13 23:17:48
List the test types here
frankf
2013/06/13 23:17:48
Perhaps only pass in the error method as a paramet
gkanwar
2013/06/17 21:04:43
Test types -- Done.
gkanwar
2013/06/17 21:04:43
Error method -- good idea, changed.
|
+ if args[1] not in VALID_TEST_TYPES: |
+ option_parser.error("Invalid test type. The test type must be one of: " + |
+ ', '.join(VALID_TEST_TYPES)) |
+ options.test_type = args[1] |
+ |
+ |
+def AddBuildTypeOption(option_container): |
+ """Adds the build type option to the OptionContainer.""" |
+ default_build_type = 'Debug' |
+ if 'BUILDTYPE' in os.environ: |
+ default_build_type = os.environ['BUILDTYPE'] |
+ option_container.add_option('--debug', action='store_const', const='Debug', |
+ dest='build_type', default=default_build_type, |
+ help=('If set, run test suites under out/Debug. ' |
+ 'Default is env var BUILDTYPE or Debug.')) |
+ option_container.add_option('--release', action='store_const', |
+ const='Release', dest='build_type', |
+ help=('If set, run test suites under out/Release.' |
+ ' Default is env var BUILDTYPE or Debug.')) |
+ |
+ |
+def AddDeviceOptions(option_container): |
+ """Adds all device-related options to the OptionContainer.""" |
+ |
+ option_container.add_option('-d', '--device', dest='test_device', |
+ help=('Target device for the test suite ' |
+ 'to run on.')) |
+ option_container.add_option('-e', '--emulator', dest='use_emulator', |
frankf
2013/06/13 23:17:48
You've moved options such as this to common, but t
gkanwar
2013/06/17 21:04:43
Done.
|
+ action='store_true', |
+ help='Run tests in a new instance of emulator.') |
+ option_container.add_option('-n', '--emulator-count', |
+ type='int', default=1, |
+ help=('Number of emulators to launch for ' |
+ 'running the tests.')) |
+ option_container.add_option('--abi', default='armeabi-v7a', |
+ help='Platform of emulators to launch.') |
+ |
+ |
+def ProcessDeviceOptions(options): |
+ """Processes emulator and device options.""" |
+ if options.use_emulator: |
+ emulator.DeleteAllTempAVDs() |
+ |
+ |
+def AddCommonOptions(option_parser, default_timeout=60): |
+ """Adds all common options in an OptionGroup to the OptionParser.""" |
+ |
+ option_group = optparse.OptionGroup(option_parser, "Common Options", |
+ "Options that apply to all test types.") |
+ |
+ AddBuildTypeOption(option_group) |
+ AddDeviceOptions(option_group) |
+ |
+ # --gtest_filter is DEPRECATED. Added for backwards compatibility |
+ # with the syntax of the old run_tests.py script. |
+ option_group.add_option('-f', '--test_filter', '--gtest_filter', |
+ dest='test_filter', |
+ help=('Test filter (if not fully qualified, ' |
+ 'will run all matches).')) |
+ option_group.add_option('--out-directory', dest='out_directory', |
+ help=('Path to the out/ directory, irrespective of ' |
+ 'the build type. Only for non-Chromium uses.')) |
+ option_group.add_option('-t', dest='timeout', |
+ help='Timeout to wait for each test', |
+ type='int', |
+ default=default_timeout) |
+ option_group.add_option('-c', dest='cleanup_test_files', |
+ help='Cleanup test files on the device after run', |
+ action='store_true') |
+ option_group.add_option('--num_retries', dest='num_retries', type='int', |
+ default=2, |
+ help=('Number of retries for a test before ' |
+ 'giving up.')) |
+ option_group.add_option('-v', |
+ '--verbose', |
+ dest='verbose_count', |
+ default=0, |
+ action='count', |
+ help='Verbose level (multiple times for more)') |
+ profilers = ['devicestatsmonitor', 'chrometrace', 'dumpheap', 'smaps', |
+ 'traceview'] |
+ option_group.add_option('--profiler', dest='profilers', action='append', |
+ choices=profilers, |
+ help=('Profiling tool to run during test. Pass ' |
+ 'multiple times to run multiple profilers. ' |
+ 'Available profilers: %s' % profilers)) |
+ option_group.add_option('--tool', |
+ dest='tool', |
+ help=('Run the test under a tool ' |
+ '(use --tool help to list them)')) |
+ option_group.add_option('--flakiness-dashboard-server', |
+ dest='flakiness_dashboard_server', |
+ help=('Address of the server that is hosting the ' |
+ 'Chrome for Android flakiness dashboard.')) |
+ option_group.add_option('--skip-deps-push', dest='push_deps', |
+ action='store_false', default=True, |
+ help=('Do not push dependencies to the device. ' |
+ 'Use this at own risk for speeding up test ' |
+ 'execution on local machine.')) |
+ option_group.add_option('--exit-code', action='store_true', |
+ help=('If set, the exit code will be total number ' |
+ 'of failures.')) |
+ option_group.add_option('--buildbot-step-failure', |
+ action='store_true', |
+ help=('If present, will set the buildbot status ' |
+ 'as STEP_FAILURE, otherwise as STEP_WARNINGS ' |
+ 'when test(s) fail.')) |
+ option_parser.add_option_group(option_group) |
+ |
+ |
+def ProcessCommonOptions(options): |
+ """Processes and handles all common options.""" |
+ if options.out_directory: |
+ cmd_helper.OutDirectory.set(options.out_directory) |
+ run_tests_helper.SetLogLevel(options.verbose_count) |
+ |
+ |
+def AddGTestTestOptions(option_parser): |
frankf
2013/06/13 23:17:48
second Test is redunant
gkanwar
2013/06/17 21:04:43
Done.
|
+ """Adds gtest options in an OptionGroup to the OptionParser""" |
frankf
2013/06/13 23:17:48
Rephrase this. e.g. Add gtest options to |option_p
gkanwar
2013/06/17 21:04:43
Done.
|
+ |
+ option_group = optparse.OptionGroup(option_parser, |
+ 'GTest Options', |
+ 'Use these options to choose which ' |
+ 'test suites to run and how.') |
+ |
+ option_group.add_option('-s', '--suite', dest='test_suite', |
+ help=('Executable name of the test suite to run ' |
+ '(use -s help to list them).')) |
+ option_group.add_option('-a', '--test_arguments', dest='test_arguments', |
+ help='Additional arguments to pass to the test.') |
+ option_group.add_option('-x', '--xvfb', dest='use_xvfb', |
+ action='store_true', |
+ help='Use Xvfb around tests (ignored if not Linux).') |
+ option_group.add_option('--webkit', action='store_true', |
+ help='Run the tests from a WebKit checkout.') |
+ option_group.add_option('--exe', action='store_true', |
+ help='If set, use the exe test runner instead of ' |
+ 'the APK.') |
+ |
+ option_parser.add_option_group(option_group) |
+ |
+ |
+def AddJavaTestOptions(option_parser): |
+ """Adds the Java test options in an OptionGroup to the OptionContainer.""" |
+ |
+ option_group = optparse.OptionGroup(option_parser, |
+ 'Java Test Options', |
+ 'Use these options to choose the details ' |
+ 'of which tests to run, and how to run ' |
frankf
2013/06/13 23:17:48
Not sure if this doc is conveying any information.
gkanwar
2013/06/17 21:04:43
Done.
|
+ 'them.') |
+ option_group.add_option( |
+ '-A', '--annotation', dest='annotation_str', |
+ help=('Comma-separated list of annotations. Run only tests with any of ' |
+ 'the given annotations. An annotation can be either a key or a ' |
+ 'key-values pair. A test that has no annotation is considered ' |
+ '"SmallTest".')) |
+ option_group.add_option( |
+ '-E', '--exclude-annotation', dest='exclude_annotation_str', |
+ help=('Comma-separated list of annotations. Exclude tests with these ' |
+ 'annotations.')) |
+ option_group.add_option('-j', '--java_only', action='store_true', |
+ default=False, help='Run only the Java tests.') |
+ option_group.add_option('-p', '--python_only', action='store_true', |
+ default=False, help='Run only the host-driven tests.') |
+ option_group.add_option('--screenshot', dest='screenshot_failures', |
+ action='store_true', |
+ help='Capture screenshots of test failures') |
+ option_group.add_option('--save-perf-json', action='store_true', |
+ help='Saves the JSON file for each UI Perf test.') |
+ option_group.add_option('--shard_retries', type=int, default=1, |
frankf
2013/06/13 23:17:48
I think this is not used anywhere and is specific
gkanwar
2013/06/17 21:04:43
Done.
|
+ help=('Number of times to retry each failure when ' |
+ 'sharding.')) |
+ option_group.add_option('--official-build', help='Run official build tests.') |
+ option_group.add_option('--python_test_root', |
+ help='Root of the host-driven tests.') |
+ option_group.add_option('--keep_test_server_ports', |
+ action='store_true', |
+ help=('Indicates the test server ports must be ' |
+ 'kept. When this is run via a sharder ' |
+ 'the test server ports should be kept and ' |
+ 'should not be reset.')) |
+ option_group.add_option('--disable_assertions', action='store_true', |
+ help='Run with java assertions disabled.') |
+ option_group.add_option('--test_data', action='append', default=[], |
+ help=('Each instance defines a directory of test ' |
+ 'data that should be copied to the target(s) ' |
+ 'before running the tests. The argument ' |
+ 'should be of the form <target>:<source>, ' |
+ '<target> is relative to the device data' |
+ 'directory, and <source> is relative to the ' |
+ 'chromium build directory.')) |
+ |
+ AddInstrumentationOptions(option_group) |
+ AddUIAutomatorOptions(option_group) |
frankf
2013/06/13 23:17:48
To avoid confusion, let's have separate OptionGrou
gkanwar
2013/06/17 21:04:43
Done.
|
+ |
+ option_parser.add_option_group(option_group) |
+ |
+ |
+def ValidateJavaTestOptions(options, option_parser): |
+ """Validates options/arguments and populates options with defaults.""" |
frankf
2013/06/13 23:17:48
Let's just call these Process instead of Validate
gkanwar
2013/06/17 21:04:43
Done.
|
+ |
+ if options.java_only and options.python_only: |
+ option_parser.error('Options java_only (-j) and python_only (-p) ' |
+ 'are mutually exclusive.') |
+ options.run_java_tests = True |
+ options.run_python_tests = True |
+ if options.java_only: |
+ options.run_python_tests = False |
+ elif options.python_only: |
+ options.run_java_tests = False |
+ |
+ if not options.python_test_root: |
+ options.run_python_tests = False |
+ |
+ if options.annotation_str: |
+ options.annotations = options.annotation_str.split(',') |
+ elif options.test_filter: |
+ options.annotations = [] |
+ else: |
+ options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest'] |
+ |
+ if options.exclude_annotation_str: |
+ options.exclude_annotations = options.exclude_annotation_str.split(',') |
+ else: |
+ options.exclude_annotations = [] |
+ |
+ if not options.keep_test_server_ports: |
+ if not ports.ResetTestServerPortAllocation(): |
+ raise Exception('Failed to reset test server port.') |
+ |
+ |
+def AddInstrumentationOptions(option_container): |
+ """Adds java/python instrumentation test options to the OptionContainer.""" |
frankf
2013/06/13 23:17:48
remove "java/python"
frankf
2013/06/13 23:17:48
What's an OptionContainer, you mean OptionGroup?
gkanwar
2013/06/17 21:04:43
"java/python" -- Done.
gkanwar
2013/06/17 21:04:43
I used OptionContainer here because these could be
|
+ option_container.add_option('-w', '--wait_debugger', dest='wait_for_debugger', |
+ action='store_true', |
+ help='(Instrumentation only) Wait for debugger.') |
+ option_container.add_option('-I', dest='install_apk', action='store_true', |
+ help='(Instrumentation only) Install APK.') |
frankf
2013/06/13 23:17:48
"Install test APK"
gkanwar
2013/06/17 21:04:43
Done.
|
+ option_container.add_option( |
+ '--test-apk', dest='test_apk', |
+ help=('(Instrumentation only) The name of the apk containing the tests ' |
+ '(without the .apk extension; e.g. "ContentShellTest"). ' |
+ 'Alternatively, this can be a full path to the apk.')) |
+ |
+ |
+def ValidateInstrumentationOptions(options, option_parser): |
+ """Validates options/arguments and populate options with defaults.""" |
+ |
+ ValidateJavaTestOptions(options, option_parser) |
+ |
+ if not options.test_apk: |
+ option_parser.error('--test-apk must be specified.') |
+ |
+ if os.path.exists(options.test_apk): |
+ # The APK is fully qualified, assume the JAR lives along side. |
+ options.test_apk_path = options.test_apk |
+ options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] + |
+ '.jar') |
+ else: |
+ options.test_apk_path = os.path.join(_SDK_OUT_DIR, |
+ options.build_type, |
+ constants.SDK_BUILD_APKS_DIR, |
+ '%s.apk' % options.test_apk) |
+ options.test_apk_jar_path = os.path.join( |
+ _SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_TEST_JAVALIB_DIR, |
+ '%s.jar' % options.test_apk) |
+ |
+ |
+def AddUIAutomatorOptions(option_container): |
+ """Adds UI Automator test options to the OptionContainer.""" |
+ option_container.add_option( |
+ '--package-name', |
+ help=('(UIAutomator only) The package name used by the apk ' |
+ 'containing the application.')) |
+ option_container.add_option( |
+ '--test-jar', dest='test_jar', |
+ help=('(UIAutomator only) The name of the dexed jar containing the tests ' |
+ '(without the .dex.jar extension). Alternatively, this can be a ' |
+ 'full path to the jar.')) |
+ |
+ |
+def ValidateUIAutomatorOptions(options, option_parser): |
+ """Validates UIAutomator options/arguments.""" |
+ |
+ ValidateJavaTestOptions(options, option_parser) |
+ |
+ if not options.package_name: |
+ option_parser.error('--package-name must be specified.') |
+ |
+ if not options.test_jar: |
+ option_parser.error('--test-jar must be specified.') |
+ |
+ if os.path.exists(options.test_jar): |
+ # The dexed JAR is fully qualified, assume the info JAR lives along side. |
+ options.uiautomator_jar = options.test_jar |
+ else: |
+ options.uiautomator_jar = os.path.join( |
+ _SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_JAVALIB_DIR, |
+ '%s.dex.jar' % options.test_jar) |
+ options.uiautomator_info_jar = ( |
+ options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] + |
+ '_java.jar') |
+ |
+ |
+def RunTests(options, option_parser): |
+ """Checks test type and dispatches to the appropriate function.""" |
+ |
+ total_failed = 0 |
+ if options.test_type == "gtests": |
+ total_failed += gtest_dispatch.Dispatch(options) |
frankf
2013/06/13 23:17:48
No need to aggregate the count since we only run o
gkanwar
2013/06/17 21:04:43
Done.
|
+ elif options.test_type == "content_browsertests": |
+ total_failed += browsertests_dispatch.Dispatch(options) |
+ elif options.test_type == "instrumentationtests": |
+ ValidateInstrumentationOptions(options, option_parser) |
+ if options.run_python_tests: |
+ total_failed += python_dispatch.Dispatch(options) |
frankf
2013/06/13 23:17:48
This is run after instrumentation tests. Same belo
gkanwar
2013/06/17 21:04:43
Done.
|
+ if options.run_java_tests: |
+ total_failed += instrumentation_dispatch.Dispatch(options) |
+ elif options.test_type == "uiautomatortests": |
+ ValidateUIAutomatorOptions(options, option_parser) |
+ if options.run_python_tests: |
+ total_failed += python_dispatch.Dispatch(options) |
+ if options.run_java_tests: |
+ total_failed += uiautomator_dispatch.Dispatch(options) |
+ else: |
+ raise Exception('Unknown test type state') |
+ |
+ return total_failed |
+ |
+ |
+def main(argv): |
+ option_parser = optparse.OptionParser( |
+ usage="Usage: %prog test_type [options]") |
+ AddCommonOptions(option_parser) |
+ AddGTestTestOptions(option_parser) |
+ AddJavaTestOptions(option_parser) |
+ options, args = option_parser.parse_args(argv) |
+ |
+ ValidateTestTypeArg(options, args, option_parser) |
+ ProcessCommonOptions(options) |
+ ProcessDeviceOptions(options) |
+ |
+ failed_tests_count = RunTests(options, option_parser) |
+ |
+ # Failures of individual test suites are communicated by printing a |
+ # STEP_FAILURE message. |
+ # Returning a success exit status also prevents the buildbot from incorrectly |
+ # marking the last suite as failed if there were failures in other suites in |
+ # the batch (this happens because the exit status is a sum of all failures |
+ # from all suites, but the buildbot associates the exit status only with the |
+ # most recent step). |
+ if options.exit_code: |
+ return failed_tests_count |
+ return 0 |
+ |
+ |
+if __name__ == '__main__': |
+ sys.exit(main(sys.argv)) |