Chromium Code Reviews| Index: build/android/run_all_tests.py |
| diff --git a/build/android/run_all_tests.py b/build/android/run_all_tests.py |
| new file mode 100755 |
| index 0000000000000000000000000000000000000000..f0cdea858158a4bfe4967c3fc5da05c43d29e665 |
| --- /dev/null |
| +++ b/build/android/run_all_tests.py |
| @@ -0,0 +1,462 @@ |
| +#!/usr/bin/env python |
| + |
| +"""Runs all types of tests from one unified interface. |
| + |
| +Types of tests supported: |
| +1. GTest native unit tests (--gtest) |
| + Example: ./run_all_tests.py --gtest --release |
|
frankf
2013/06/11 01:51:47
Don't include --release
Specify the suite name
gkanwar
2013/06/12 01:27:32
Done.
|
| +2. ContentBrowser unit tests (--browser) |
| + Example: ./adb_install_apk.py --apk=ContentShell.apk --release |
| + ./run_all_tests.py --browser --test-apk=ContentShellTest --release |
|
frankf
2013/06/11 01:51:47
browser -> content_browsertests
gkanwar
2013/06/12 01:27:32
Done.
|
| +3. Instrumentation tests (--instrumentation) |
|
frankf
2013/06/11 01:51:47
Seperate instrumentation into 1. Instrumentation 2
gkanwar
2013/06/12 01:27:32
Done.
|
| +3a. Python host-driven (runs by default, use --java_only to exclude these |
| + tests, use --python_only to run only these tests; you must still specify |
| + at least one of --test-apk or --test-jar) |
| + Example: ./run_all_tests.py --instrumentation --python_only --release |
| + --test-apk=ChromiumTestShellTest |
| + --python_test_root=src/chrome/android/ |
| +3b. Java UI-Automator (runs if --test-jar is specified) |
| + Example: ./run_all_tests.py --instrumentation --java_only |
| + --test-jar=chromium_testshell_uiautomator_tests |
| + --package-name=org.chromium.chrome.testshell --release |
| +3c. Java non-UI-Automator (runs if --test-apk is specified) |
| + Example: ./run_all_tests.py --instrumentation --java_only |
| + --test-apk=ChromiumTestShellTest --release |
| + |
| +TODO(gkanwar): |
| +* Incorporate the functionality of adb_install_apk.py to allow |
| + installing the APK for a test in the same command as running the test. |
| +* Add options to run Monkey tests. |
| +* Allow running many test types non-exclusively from the same command. |
| +""" |
| + |
| +import optparse |
| +import os |
| +import sys |
| + |
| +from pylib import cmd_helper |
| +from pylib import constants |
| +from pylib import ports |
| +from pylib.browsertests import dispatch as browsertests_dispatch |
| +from pylib.gtest import dispatch as gtest_dispatch |
| +from pylib.host_driven import run_python_tests as python_dispatch |
| +from pylib.instrumentation import dispatch as instrumentation_dispatch |
| +from pylib.uiautomator import dispatch as uiautomator_dispatch |
| +from pylib.utils import emulator |
| +from pylib.utils import run_tests_helper |
| + |
| +_SDK_OUT_DIR = os.path.join(constants.DIR_SOURCE_ROOT, 'out') |
| + |
| + |
| +def AddTestTypeOption(option_parser): |
|
frankf
2013/06/11 02:50:15
You need to remove utils/test_option_...
gkanwar
2013/06/12 01:27:32
Done.
|
| + """Decorates OptionParser with test type options in an OptionGroup.""" |
| + |
| + option_group = optparse.OptionGroup(option_parser, 'Test Type Options', |
| + 'Select one of the test types ' |
| + 'below to run.') |
| + |
| + # TODO(gkanwar): Support running multiple test types from |
| + # one command. |
| + option_group.add_option('--gtest', action='store_true', |
| + dest='gtest_only', default=False, |
| + help='If set, run GTest unit tests. See the ' |
| + 'GTest flags for more detail on options.') |
| + option_group.add_option('--browser', action='store_true', |
| + dest='browser_only', default=False, |
| + help='If set, run contentbrowser unit tests. ' |
| + 'See the contentbrowser flags for more detail ' |
| + 'on options.') |
| + option_group.add_option('--instrumentation', action='store_true', |
| + dest='instrumentation_only', default=False, |
| + help='If set, run java/python instrumentation ' |
| + 'tests (both UI Automator and not, depending ' |
| + 'on whether you pass a UIA JAR or a non-UIA ' |
| + 'APK). See the instrumentation flags for more ' |
| + 'detail on options.') |
| + |
| + option_parser.add_option_group(option_group) |
| + |
| + |
| +def ValidateTestTypeOption(options, option_parser): |
| + """Validates that exactly one of the test type options are set.""" |
| + if sum([options.gtest_only, options.browser_only, |
| + options.instrumentation_only]) != 1: |
| + option_parser.error('Exactly one of the test type flags must be set') |
| + |
| + |
| +def AddBuildTypeOption(option_container): |
| + """Decorates OptionContainer with build type option.""" |
| + default_build_type = 'Debug' |
| + if 'BUILDTYPE' in os.environ: |
| + default_build_type = os.environ['BUILDTYPE'] |
| + option_container.add_option('--debug', action='store_const', const='Debug', |
| + dest='build_type', default=default_build_type, |
| + help='If set, run test suites under out/Debug. ' |
| + 'Default is env var BUILDTYPE or Debug.') |
| + option_container.add_option('--release', action='store_const', |
| + const='Release', dest='build_type', |
| + help='If set, run test suites under out/Release. ' |
| + 'Default is env var BUILDTYPE or Debug.') |
| + |
| + |
| +def AddDeviceOptions(option_container): |
| + """Decorates OptionContainer with all device-related options.""" |
| + |
| + option_container.add_option('-d', '--device', dest='test_device', |
| + help='Target device for the test suite ' |
| + 'to run on.') |
| + option_container.add_option('-e', '--emulator', dest='use_emulator', |
| + action='store_true', |
| + help='Run tests in a new instance of emulator.') |
| + option_container.add_option('-n', '--emulator-count', |
| + type='int', default=1, |
| + help='Number of emulators to launch for ' |
| + 'running the tests.') |
| + option_container.add_option('--abi', default='armeabi-v7a', |
| + help='Platform of emulators to launch.') |
| + |
| + |
| +def ProcessDeviceOptions(options): |
| + """Processes emulator and device options.""" |
| + if options.use_emulator: |
| + emulator.DeleteAllTempAVDs() |
| + |
| + |
| +def AddCommonOptions(option_parser, default_timeout=60): |
| + """Decorates OptionParser with all common options in an OptionGroup.""" |
| + |
| + option_group = optparse.OptionGroup(option_parser, "Common Options", |
| + "Options that apply to all test types.") |
| + |
| + AddBuildTypeOption(option_group) |
| + AddDeviceOptions(option_group) |
| + |
| + # --gtest_filter is DEPRECATED. Added for backwards compatibility |
| + # with the syntax of the old run_tests.py script. |
| + option_group.add_option('-f', '--test_filter', '--gtest_filter', |
| + dest='test_filter', |
| + help='Test filter (if not fully qualified, ' |
| + 'will run all matches).') |
| + option_group.add_option('--out-directory', dest='out_directory', |
| + help='Path to the out/ directory, irrespective of ' |
| + 'the build type. Only for non-Chromium uses.') |
| + option_group.add_option('-t', dest='timeout', |
| + help='Timeout to wait for each test', |
| + type='int', |
| + default=default_timeout) |
| + option_group.add_option('-c', dest='cleanup_test_files', |
| + help='Cleanup test files on the device after run', |
| + action='store_true') |
| + option_group.add_option('--num_retries', dest='num_retries', type='int', |
| + default=2, |
| + help='Number of retries for a test before ' |
| + 'giving up.') |
| + option_group.add_option('-v', |
| + '--verbose', |
| + dest='verbose_count', |
| + default=0, |
| + action='count', |
| + help='Verbose level (multiple times for more)') |
| + profilers = ['devicestatsmonitor', 'chrometrace', 'dumpheap', 'smaps', |
| + 'traceview'] |
| + option_group.add_option('--profiler', dest='profilers', action='append', |
| + choices=profilers, |
| + help='Profiling tool to run during test. ' |
| + 'Pass multiple times to run multiple profilers. ' |
| + 'Available profilers: %s' % profilers) |
| + option_group.add_option('--tool', |
| + dest='tool', |
| + help='Run the test under a tool ' |
| + '(use --tool help to list them)') |
| + option_group.add_option('--flakiness-dashboard-server', |
| + dest='flakiness_dashboard_server', |
| + help=('Address of the server that is hosting the ' |
| + 'Chrome for Android flakiness dashboard.')) |
| + option_group.add_option('--exit-code', action='store_true', |
| + help='If set, the exit code will be total number ' |
| + 'of failures.') |
| + option_group.add_option('--buildbot-step-failure', |
| + action='store_true', |
| + help=('If present, will set the buildbot status ' |
| + 'as STEP_FAILURE, otherwise as STEP_WARNINGS ' |
| + 'when test(s) fail.')) |
| + option_parser.add_option_group(option_group) |
| + |
| + |
| +def ProcessCommonOptions(options): |
| + """Process and handle all common options.""" |
| + if options.out_directory: |
| + cmd_helper.OutDirectory.set(options.out_directory) |
| + run_tests_helper.SetLogLevel(options.verbose_count) |
| + |
| + |
| +def AddUnitTestOptions(option_parser): |
|
frankf
2013/06/11 01:51:47
I would change this back to GTest. We care about t
gkanwar
2013/06/12 01:27:32
This set of options is common between both Gtests
|
| + """Decorates OptionParser with gtest/browsertest options in an OptionGroup.""" |
| + |
| + option_group = optparse.OptionGroup(option_parser, |
| + 'GTest/BrowserTest Options', |
| + 'Use these options to choose which ' |
| + 'test suites to run and how.') |
| + |
| + option_group.add_option('-s', '--suite', dest='test_suite', |
| + help='Executable name of the test suite to run ' |
| + '(use -s help to list them).') |
| + option_group.add_option('-a', '--test_arguments', dest='test_arguments', |
| + help='Additional arguments to pass to the test.') |
| + option_group.add_option('-x', '--xvfb', dest='use_xvfb', |
| + action='store_true', |
| + help='Use Xvfb around tests (ignored if not Linux).') |
| + option_group.add_option('--webkit', action='store_true', |
| + help='Run the tests from a WebKit checkout.') |
| + option_group.add_option('--exe', action='store_true', |
| + help='If set, use the exe test runner instead of ' |
| + 'the APK.') |
| + |
| + option_parser.add_option_group(option_group) |
| + |
| + |
| +def AddInstrumentationOptions(option_parser): |
| + """Decorates OptionParser with Instrumentation/UIAutomator test options.""" |
| + |
| + option_group = optparse.OptionGroup(option_parser, |
| + 'InstrumentationTest Options', |
| + 'Options for Java/Python/UIAutomator ' |
| + 'instrumentation tests.') |
| + option_group.add_option( |
| + '-A', '--annotation', dest='annotation_str', |
| + help=('Comma-separated list of annotations. Run only tests with any of ' |
| + 'the given annotations. An annotation can be either a key or a ' |
| + 'key-values pair. A test that has no annotation is considered ' |
| + '"SmallTest".')) |
| + option_group.add_option( |
| + '-E', '--exclude-annotation', dest='exclude_annotation_str', |
| + help=('Comma-separated list of annotations. Exclude tests with these ' |
| + 'annotations.')) |
| + option_group.add_option('-j', '--java_only', action='store_true', |
| + default=False, help='Run only the Java tests.') |
| + option_group.add_option('-p', '--python_only', action='store_true', |
| + default=False, help='Run only the Python tests.') |
| + option_group.add_option('--screenshot', dest='screenshot_failures', |
| + action='store_true', |
| + help='Capture screenshots of test failures') |
| + option_group.add_option('--save-perf-json', action='store_true', |
| + help='Saves the JSON file for each UI Perf test.') |
| + option_group.add_option('--shard_retries', type=int, default=1, |
| + help=('Number of times to retry each failure when ' |
| + 'sharding.')) |
| + option_group.add_option('--official-build', help='Run official build tests.') |
| + option_group.add_option('--python_test_root', |
| + help='Root of the python-driven tests.') |
| + option_group.add_option('--keep_test_server_ports', |
| + action='store_true', |
| + help='Indicates the test server ports must be ' |
| + 'kept. When this is run via a sharder ' |
| + 'the test server ports should be kept and ' |
| + 'should not be reset.') |
| + option_group.add_option('--disable_assertions', action='store_true', |
| + help='Run with java assertions disabled.') |
| + option_group.add_option('--test_data', action='append', default=[], |
| + help=('Each instance defines a directory of test ' |
| + 'data that should be copied to the target(s) ' |
| + 'before running the tests. The argument ' |
| + 'should be of the form <target>:<source>, ' |
| + '<target> is relative to the device data' |
| + 'directory, and <source> is relative to the ' |
| + 'chromium build directory.')) |
| + |
| + AddNonUIAutomatorOptions(option_group) |
| + AddUIAutomatorOptions(option_group) |
| + |
| + option_parser.add_option_group(option_group) |
| + |
| + |
| +def ValidateInstrumentationOptions(options, option_parser): |
| + """Validate options/arguments and populate options with defaults.""" |
| + |
| + # Common options |
| + if options.java_only and options.python_only: |
| + option_parser.error('Options java_only (-j) and python_only (-p) ' |
| + 'are mutually exclusive.') |
| + options.run_java_tests = True |
| + options.run_python_tests = True |
| + if options.java_only: |
| + options.run_python_tests = False |
| + elif options.python_only: |
| + options.run_java_tests = False |
| + |
| + if options.run_python_tests and not options.python_test_root: |
| + option_parser.error('You must specify --python_test_root when ' |
| + 'running Python tests.') |
| + |
| + if options.annotation_str: |
| + options.annotations = options.annotation_str.split(',') |
| + elif options.test_filter: |
| + options.annotations = [] |
| + else: |
| + options.annotations = ['Smoke', 'SmallTest', 'MediumTest', 'LargeTest'] |
| + |
| + if options.exclude_annotation_str: |
| + options.exclude_annotations = options.exclude_annotation_str.split(',') |
| + else: |
| + options.exclude_annotations = [] |
| + |
| + if not options.keep_test_server_ports: |
| + if not ports.ResetTestServerPortAllocation(): |
| + raise Exception('Failed to reset test server port.') |
| + |
| + # Validate only one of the UIAutomator or non-UIAutomator options, |
| + # determined based on the --test-apk or --test-jar flag. |
| + options.run_uiautomator_tests = False |
| + options.run_nonuiautomator_tests = False |
| + if not (options.test_apk or options.test_jar): |
| + option_parser.error('You must specify at least one of --test-apk ' |
| + '(for non-UIAutomator tests) and --test-jar ' |
| + '(for UIAutomator tests).') |
| + if options.test_apk: # Assume that having an APK means non-UIAutomator |
| + options.run_nonuiautomator_tests = True |
| + ValidateNonUIAutomatorOptions(options, option_parser) |
| + if options.test_jar: # Assume that having a JAR means UIAutomator |
| + options.run_uiautomator_tests = True |
| + ValidateUIAutomatorOptions(options, option_parser) |
| + |
| + |
| +def AddNonUIAutomatorOptions(option_container): |
| + """Add java/python instrumentation test options.""" |
| + option_container.add_option('-w', '--wait_debugger', dest='wait_for_debugger', |
| + action='store_true', |
| + help='(non-UIAutomator only) Wait for debugger.') |
| + option_container.add_option('-I', dest='install_apk', action='store_true', |
| + help='(non-UIAutomator only) Install APK.') |
| + option_container.add_option( |
| + '--test-apk', dest='test_apk', |
| + help=('(non-UIAutomator only) The name of the apk containing the tests ' |
| + '(without the .apk extension; e.g. "ContentShellTest"). ' |
| + 'Alternatively, this can be a full path to the apk.')) |
| + |
| + |
| +def ValidateNonUIAutomatorOptions(options, option_parser): |
| + """Validate options/arguments and populate options with defaults.""" |
| + |
| + if not options.test_apk: |
| + option_parser.error('--test-apk must be specified.') |
| + |
| + if os.path.exists(options.test_apk): |
| + # The APK is fully qualified, assume the JAR lives along side. |
| + options.test_apk_path = options.test_apk |
| + options.test_apk_jar_path = (os.path.splitext(options.test_apk_path)[0] + |
| + '.jar') |
| + else: |
| + options.test_apk_path = os.path.join(_SDK_OUT_DIR, |
| + options.build_type, |
| + constants.SDK_BUILD_APKS_DIR, |
| + '%s.apk' % options.test_apk) |
| + options.test_apk_jar_path = os.path.join( |
| + _SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_TEST_JAVALIB_DIR, |
| + '%s.jar' % options.test_apk) |
| + |
| + |
| +def AddUIAutomatorOptions(option_container): |
| + """Add UI Automator test options.""" |
| + option_container.add_option( |
| + '--package-name', |
| + help=('(UIAutomator only) The package name used by the apk ' |
| + 'containing the application.')) |
| + option_container.add_option( |
| + '--test-jar', dest='test_jar', |
| + help=('(UIAutomator only) The name of the dexed jar containing the tests ' |
| + '(without the .dex.jar extension). Alternatively, this can be a ' |
| + 'full path to the jar.')) |
| + |
| + |
| +def ValidateUIAutomatorOptions(options, option_parser): |
| + """Validate UIAutomator options/arguments.""" |
| + |
| + if not options.package_name: |
| + option_parser.error('--package-name must be specified.') |
| + |
| + if not options.test_jar: |
| + option_parser.error('--test-jar must be specified.') |
| + |
| + if os.path.exists(options.test_jar): |
| + # The dexed JAR is fully qualified, assume the info JAR lives along side. |
| + options.uiautomator_jar = options.test_jar |
| + else: |
| + options.uiautomator_jar = os.path.join( |
| + _SDK_OUT_DIR, options.build_type, constants.SDK_BUILD_JAVALIB_DIR, |
| + '%s.dex.jar' % options.test_jar) |
| + options.uiautomator_info_jar = ( |
| + options.uiautomator_jar[:options.uiautomator_jar.find('.dex.jar')] + |
| + '_java.jar') |
| + |
| + |
| +def AddAllOptions(option_parser): |
|
frankf
2013/06/11 02:50:15
Just inline this.
gkanwar
2013/06/12 01:27:32
Done.
|
| + """Decorates the OptionParser with all options.""" |
| + |
| + AddTestTypeOption(option_parser) |
| + AddCommonOptions(option_parser) |
| + AddUnitTestOptions(option_parser) |
| + AddInstrumentationOptions(option_parser) |
| + |
| +def RunGTests(options, option_parser): |
| + """Runs GTests using the given options.""" |
| + return gtest_dispatch.Dispatch(options) |
| + |
| +def RunBrowserTests(options, option_parser): |
|
frankf
2013/06/11 02:50:15
Also inline these.
gkanwar
2013/06/12 01:27:32
Done.
|
| + """Runs ContentBrowser unit tests using the given options.""" |
| + return browsertests_dispatch.Dispatch(options) |
| + |
| +def RunInstrumentationTests(options, option_parser): |
| + """Runs Java/Python instrumentation tests.""" |
| + ValidateInstrumentationOptions(options, option_parser) |
| + |
| + total_failed = 0 |
| + if options.run_python_tests: |
| + ValidateInstrumentationOptions(options, option_parser) |
|
frankf
2013/06/11 02:50:15
Why is this duplicated?
gkanwar
2013/06/12 01:27:32
Done.
|
| + total_failed += python_dispatch.Dispatch(options) |
| + if options.run_java_tests: |
| + if options.run_uiautomator_tests: |
| + total_failed += uiautomator_dispatch.Dispatch(options) |
| + if options.run_nonuiautomator_tests: |
| + total_failed += instrumentation_dispatch.Dispatch(options) |
| + |
| + return total_failed |
| + |
| +def RunTests(options, option_parser): |
| + """Checks test type and dispatches to the appropriate function.""" |
| + |
| + if options.gtest_only: |
| + RunGTests(options, option_parser) |
| + elif options.browser_only: |
| + RunBrowserTests(options, option_parser) |
| + elif options.instrumentation_only: |
| + RunInstrumentationTests(options, option_parser) |
| + else: |
| + raise Exception('Unknown test type state') |
| + |
| + |
| +def main(argv): |
| + option_parser = optparse.OptionParser() |
| + AddAllOptions(option_parser) |
| + options, args = option_parser.parse_args(argv) |
| + |
| + ProcessCommonOptions(options) |
| + ProcessDeviceOptions(options) |
| + |
| + ValidateTestTypeOption(options, option_parser) |
| + |
| + failed_tests_count = RunTests(options, option_parser) |
|
frankf
2013/06/11 02:50:15
As it stands RunTests doesn't return anything.
gkanwar
2013/06/12 01:27:32
Fixed to return the total number of failed tests.
|
| + |
| + # Failures of individual test suites are communicated by printing a |
| + # STEP_FAILURE message. |
| + # Returning a success exit status also prevents the buildbot from incorrectly |
| + # marking the last suite as failed if there were failures in other suites in |
| + # the batch (this happens because the exit status is a sum of all failures |
| + # from all suites, but the buildbot associates the exit status only with the |
| + # most recent step). |
| + if options.exit_code: |
| + return failed_tests_count |
|
frankf
2013/06/11 02:50:15
Returning the total number of failed tests here do
gkanwar
2013/06/12 01:27:32
Sounds good. For now, I fixed the RunTests functio
|
| + return 0 |
| + |
| + |
| +if __name__ == '__main__': |
| + sys.exit(main(sys.argv)) |