OLD | NEW |
(Empty) | |
| 1 #!/usr/bin/python |
| 2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. |
| 3 # Use of this source code is governed by a BSD-style license that can be |
| 4 # found in the LICENSE file. |
| 5 |
| 6 """Runs all the native unit tests. |
| 7 |
| 8 1. Copy over test binary to /data/local on device. |
| 9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) |
| 10 to be deployed to the device (in /data/local/tmp). |
| 11 3. Environment: |
| 12 3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named: |
| 13 /data/local/tmp/chrome/test/data |
| 14 3.2. page_cycler_tests have following requirements, |
| 15 3.2.1 the following data on host: |
| 16 <chrome_src_dir>/tools/page_cycler |
| 17 <chrome_src_dir>/data/page_cycler |
| 18 3.2.2. two data directories to store above test data on device named: |
| 19 /data/local/tmp/tools/ (for database perf test) |
| 20 /data/local/tmp/data/ (for other perf tests) |
| 21 3.2.3. a http server to serve http perf tests. |
| 22 The http root is host's <chrome_src_dir>/data/page_cycler/, port 8000. |
| 23 3.2.4 a tool named forwarder is also required to run on device to |
| 24 forward the http request/response between host and device. |
| 25 3.2.5 Chrome is installed on device. |
| 26 4. Run the binary in the device and stream the log to the host. |
| 27 4.1. Optionally, filter specific tests. |
| 28 4.2. Optionally, rebaseline: run the available tests and update the |
| 29 suppressions file for failures. |
| 30 4.3. If we're running a single test suite and we have multiple devices |
| 31 connected, we'll shard the tests. |
| 32 5. Clean up the device. |
| 33 |
| 34 Suppressions: |
| 35 |
| 36 Individual tests in a test binary can be suppressed by listing it in |
| 37 the gtest_filter directory in a file of the same name as the test binary, |
| 38 one test per line. Here is an example: |
| 39 |
| 40 $ cat gtest_filter/base_unittests_disabled |
| 41 DataPackTest.Load |
| 42 ReadOnlyFileUtilTest.ContentsEqual |
| 43 |
| 44 This file is generated by the tests running on devices. If running on emulator, |
| 45 additonal filter file which lists the tests only failed in emulator will be |
| 46 loaded. We don't care about the rare testcases which succeeded on emuatlor, but |
| 47 failed on device. |
| 48 """ |
| 49 |
| 50 import logging |
| 51 import os |
| 52 import re |
| 53 import sys |
| 54 |
| 55 import android_commands |
| 56 import cmd_helper |
| 57 import debug_info |
| 58 import emulator |
| 59 import run_tests_helper |
| 60 from single_test_runner import SingleTestRunner |
| 61 from test_package_executable import TestPackageExecutable |
| 62 from test_result import BaseTestResult, TestResults |
| 63 |
| 64 _TEST_SUITES = ['base_unittests',] |
| 65 |
| 66 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, |
| 67 timeout, performance_test, cleanup_test_files, tool, |
| 68 log_dump_name): |
| 69 """Runs the tests. |
| 70 |
| 71 Args: |
| 72 device: Device to run the tests. |
| 73 test_suite: A specific test suite to run, empty to run all. |
| 74 gtest_filter: A gtest_filter flag. |
| 75 test_arguments: Additional arguments to pass to the test binary. |
| 76 rebaseline: Whether or not to run tests in isolation and update the filter. |
| 77 timeout: Timeout for each test. |
| 78 performance_test: Whether or not performance test(s). |
| 79 cleanup_test_files: Whether or not to cleanup test files on device. |
| 80 tool: Name of the Valgrind tool. |
| 81 log_dump_name: Name of log dump file. |
| 82 |
| 83 Returns: |
| 84 A TestResults object. |
| 85 """ |
| 86 results = [] |
| 87 |
| 88 if test_suite: |
| 89 global _TEST_SUITES |
| 90 if not os.path.exists(test_suite): |
| 91 logging.critical('Unrecognized test suite, supported: %s' % |
| 92 _TEST_SUITES) |
| 93 if test_suite in _TEST_SUITES: |
| 94 logging.critical('(Remember to include the path: out/Release/%s)', |
| 95 test_suite) |
| 96 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')]) |
| 97 _TEST_SUITES = [test_suite] |
| 98 else: |
| 99 # If not specified, assume the test suites are in out/Release |
| 100 test_suite_dir = os.path.abspath(os.path.join(run_tests_helper.CHROME_DIR, |
| 101 'out', 'Release')) |
| 102 _TEST_SUITES = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES] |
| 103 debug_info_list = [] |
| 104 for t in _TEST_SUITES: |
| 105 test = SingleTestRunner(device, t, gtest_filter, test_arguments, |
| 106 timeout, rebaseline, performance_test, |
| 107 cleanup_test_files, tool, not not log_dump_name) |
| 108 test.RunTests() |
| 109 results += [test.test_results] |
| 110 # Collect debug info. |
| 111 debug_info_list += [test.dump_debug_info] |
| 112 if rebaseline: |
| 113 test.UpdateFilter(test.test_results.failed) |
| 114 elif test.test_results.failed: |
| 115 # Stop running test if encountering failed test. |
| 116 test.test_results.LogFull() |
| 117 break |
| 118 # Zip all debug info outputs into a file named by log_dump_name. |
| 119 debug_info.GTestDebugInfo.ZipAndCleanResults( |
| 120 os.path.join(run_tests_helper.CHROME_DIR, 'out', 'Release', |
| 121 'debug_info_dumps'), |
| 122 log_dump_name, [d for d in debug_info_list if d]) |
| 123 return TestResults.FromTestResults(results) |
| 124 |
| 125 def Dispatch(options): |
| 126 """Dispatches the tests, sharding if possible. |
| 127 |
| 128 If options.use_emulator is True, all tests will be run in a new emulator |
| 129 instance. |
| 130 |
| 131 Args: |
| 132 options: options for running the tests. |
| 133 |
| 134 Returns: |
| 135 0 if successful, number of failing tests otherwise. |
| 136 """ |
| 137 if options.test_suite == 'help': |
| 138 ListTestSuites() |
| 139 return 0 |
| 140 buildbot_emulator = None |
| 141 attached_devices = [] |
| 142 |
| 143 if options.use_emulator: |
| 144 buildbot_emulator = emulator.Emulator() |
| 145 buildbot_emulator.Launch() |
| 146 attached_devices.append(buildbot_emulator.device) |
| 147 else: |
| 148 attached_devices = android_commands.GetAttachedDevices() |
| 149 |
| 150 if not attached_devices: |
| 151 logging.critical('A device must be attached and online.') |
| 152 return 1 |
| 153 |
| 154 test_results = RunTests(attached_devices[0], options.test_suite, |
| 155 options.gtest_filter, options.test_arguments, |
| 156 options.rebaseline, options.timeout, |
| 157 options.performance_test, |
| 158 options.cleanup_test_files, options.tool, |
| 159 options.log_dump) |
| 160 if buildbot_emulator: |
| 161 buildbot_emulator.Shutdown() |
| 162 return len(test_results.failed) |
| 163 |
| 164 def ListTestSuites(): |
| 165 """Display a list of available test suites |
| 166 """ |
| 167 print 'Available test suites are:' |
| 168 for test_suite in _TEST_SUITES: |
| 169 print test_suite |
| 170 |
| 171 |
| 172 def main(argv): |
| 173 option_parser = run_tests_helper.CreateTestRunnerOptionParser(None, |
| 174 default_timeout=0) |
| 175 option_parser.add_option('-s', dest='test_suite', |
| 176 help='Executable name of the test suite to run ' |
| 177 '(use -s help to list them)') |
| 178 option_parser.add_option('-r', dest='rebaseline', |
| 179 help='Rebaseline and update *testsuite_disabled', |
| 180 action='store_true', |
| 181 default=False) |
| 182 option_parser.add_option('-f', dest='gtest_filter', |
| 183 help='gtest filter') |
| 184 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', |
| 185 help='Additional arguments to pass to the test') |
| 186 option_parser.add_option('-p', dest='performance_test', |
| 187 help='Indicator of performance test', |
| 188 action='store_true', |
| 189 default=False) |
| 190 option_parser.add_option('-L', dest='log_dump', |
| 191 help='file name of log dump, which will be put in' |
| 192 'subfolder debug_info_dumps under the same directory' |
| 193 'in where the test_suite exists.') |
| 194 option_parser.add_option('-e', '--emulator', dest='use_emulator', |
| 195 help='Run tests in a new instance of emulator', |
| 196 action='store_true', |
| 197 default=False) |
| 198 options, args = option_parser.parse_args(argv) |
| 199 if len(args) > 1: |
| 200 print 'Unknown argument:', args[1:] |
| 201 option_parser.print_usage() |
| 202 sys.exit(1) |
| 203 run_tests_helper.SetLogLevel(options.verbose_count) |
| 204 return Dispatch(options) |
| 205 |
| 206 |
| 207 if __name__ == '__main__': |
| 208 sys.exit(main(sys.argv)) |
OLD | NEW |