OLD | NEW |
---|---|
(Empty) | |
1 #!/usr/bin/python | |
2 # Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
3 # Use of this source code is governed by a BSD-style license that can be | |
4 # found in the LICENSE file. | |
5 | |
6 """Runs all the native unit tests. | |
7 | |
8 1. Copy over test binary to /data/local on device. | |
9 2. Resources: chrome/unit_tests requires resources (chrome.pak and en-US.pak) | |
10 to be deployed to the device (in /data/local/tmp). | |
11 3. Environment: | |
12 3.1. chrome/unit_tests requires (via chrome_paths.cc) a directory named: | |
13 /data/local/tmp/chrome/test/data | |
14 3.2. page_cycler_tests have following requirements, | |
15 3.2.1 the following data on host: | |
16 <chrome_src_dir>/tools/page_cycler | |
17 <chrome_src_dir>/data/page_cycler | |
18 3.2.2. two data directories to store above test data on device named: | |
19 /data/local/tmp/tools/ (for database perf test) | |
20 /data/local/tmp/data/ (for other perf tests) | |
21 3.2.3. a http server to serve http perf tests. | |
22 The http root is host's <chrome_src_dir>/data/page_cycler/, port 8000. | |
23 3.2.4 a tool named forwarder is also required to run on device to | |
24 forward the http request/response between host and device. | |
25 3.2.5 Chrome is installed on device. | |
26 4. Run the binary in the device and stream the log to the host. | |
27 4.1. Optionally, filter specific tests. | |
28 4.2. Optionally, rebaseline: run the available tests and update the | |
29 suppressions file for failures. | |
30 4.3. If we're running a single test suite and we have multiple devices | |
31 connected, we'll shard the tests. | |
32 5. Clean up the device. | |
33 | |
34 Suppressions: | |
35 | |
36 Individual tests in a test binary can be suppressed by listing it in | |
37 the gtest_filter directory in a file of the same name as the test binary, | |
38 one test per line. Here is an example: | |
39 | |
40 $ cat gtest_filter/base_unittests_disabled | |
41 DataPackTest.Load | |
42 ReadOnlyFileUtilTest.ContentsEqual | |
43 | |
44 This file is generated by the tests running on devices. If running on emulator, | |
45 additonal filter file which lists the tests only failed in emulator will be | |
46 loaded. We don't care about the rare testcases which succeeded on emuatlor, but | |
47 failed on device. | |
48 """ | |
49 | |
50 import logging | |
51 import multiprocessing | |
52 import os | |
53 import re | |
54 import subprocess | |
55 import sys | |
56 | |
57 import android_commands | |
58 from base_test_sharder import * | |
Nirnimesh
2011/10/21 08:16:15
do not import *
michaelbai
2011/10/21 21:08:41
Done.
| |
59 import cmd_helper | |
60 import debug_info | |
61 import emulator | |
62 from run_tests_helper import * | |
63 from single_test_runner import * | |
64 from test_package_executable import TestPackageExecutable | |
65 from test_result import * | |
66 | |
67 _TEST_SUITES = ['base_unittests', | |
68 ] | |
Nirnimesh
2011/10/21 08:16:15
align under _
michaelbai
2011/10/21 21:08:41
Done.
| |
69 | |
70 def RunTests(device, test_suite, gtest_filter, test_arguments, rebaseline, | |
71 timeout, performance_test, cleanup_test_files, tool, | |
72 log_dump_name): | |
73 """Runs the tests. | |
74 | |
75 Args: | |
76 device: Device to run the tests. | |
77 test_suite: A specific test suite to run, empty to run all. | |
78 gtest_filter: A gtest_filter flag. | |
79 test_arguments: Additional arguments to pass to the test binary. | |
80 rebaseline: Whether or not to run tests in isolation and update the filter. | |
81 timeout: Timeout for each test. | |
82 performance_test: Whether or not performance test(s). | |
83 cleanup_test_files: Whether or not to cleanup test files on device. | |
84 tool: Name of the Valgrind tool. | |
85 log_dump_name: Name of log dump file. | |
86 | |
87 Returns: | |
88 A TestResults object. | |
89 """ | |
90 results = [] | |
91 | |
92 if test_suite: | |
93 global _TEST_SUITES | |
94 if not os.path.exists(test_suite): | |
95 logging.critical('Unrecognized test suite, supported: %s' % | |
96 _TEST_SUITES) | |
97 if test_suite in _TEST_SUITES: | |
98 logging.critical('(Remember to include the path: out/Release/%s)', | |
99 test_suite) | |
100 return TestResults.FromOkAndFailed([], [BaseTestResult(test_suite, '')]) | |
101 _TEST_SUITES = [test_suite] | |
102 else: | |
103 # If not specified, assume the test suites are in out/Release | |
104 test_suite_dir = os.path.abspath(os.path.join(CHROME_DIR, 'out', 'Release')) | |
105 _TEST_SUITES = [os.path.join(test_suite_dir, t) for t in _TEST_SUITES] | |
106 debug_info_list = [] | |
107 for t in _TEST_SUITES: | |
108 test = SingleTestRunner(device, t, gtest_filter, test_arguments, | |
109 timeout, rebaseline, performance_test, | |
110 cleanup_test_files, tool, not not log_dump_name) | |
111 test.RunTests() | |
112 results += [test.test_results] | |
113 # Collect debug info. | |
114 debug_info_list += [test.dump_debug_info] | |
115 if rebaseline: | |
116 test.UpdateFilter(test.test_results.failed) | |
117 elif test.test_results.failed: | |
118 # Stop running test if encountering failed test. | |
119 test.test_results.LogFull() | |
120 break | |
121 # Zip all debug info outputs into a file named by log_dump_name. | |
122 debug_info.GTestDebugInfo.ZipAndCleanResults( | |
123 os.path.join(CHROME_DIR, 'out', 'Release', 'debug_info_dumps'), | |
124 log_dump_name, [d for d in debug_info_list if d]) | |
125 return TestResults.FromTestResults(results) | |
126 | |
127 | |
128 class TestSharder(BaseTestSharder): | |
129 """Responsible for sharding the tests on the connected devices.""" | |
130 | |
131 def __init__(self, attached_devices, test_suite, gtest_filter, | |
132 test_arguments, timeout, rebaseline, performance_test, | |
133 cleanup_test_files, tool): | |
134 BaseTestSharder.__init__(self, attached_devices) | |
135 self.test_suite = test_suite | |
136 self.test_suite_basename = os.path.basename(test_suite) | |
137 self.gtest_filter = gtest_filter | |
138 self.test_arguments = test_arguments | |
139 self.timeout = timeout | |
140 self.rebaseline = rebaseline | |
141 self.performance_test = performance_test | |
142 self.cleanup_test_files = cleanup_test_files | |
143 self.tool = tool | |
144 test = SingleTestRunner(self.attached_devices[0], test_suite, gtest_filter, | |
145 test_arguments, timeout, rebaseline, | |
146 performance_test, cleanup_test_files, tool) | |
147 all_tests = set(test.test_package.GetAllTests()) | |
148 if not rebaseline: | |
149 all_tests -= set(test.GetDisabledTests()) | |
150 self.tests = list(all_tests) | |
151 | |
152 def CreateShardedTestRunner(self, device, index): | |
153 """Creates a suite-specific test runner. | |
154 | |
155 Args: | |
156 device: Device serial where this shard will run. | |
157 index: Index of this device in the pool. | |
158 | |
159 Returns: | |
160 A SingleTestRunner object. | |
161 """ | |
162 shard_size = len(self.tests) / len(self.attached_devices) | |
163 shard_test_list = self.tests[index * shard_size : (index + 1) * shard_size] | |
164 test_filter = ':'.join(shard_test_list) | |
165 return SingleTestRunner(device, self.test_suite, | |
166 test_filter, self.test_arguments, self.timeout, | |
167 self.rebaseline, self.performance_test, | |
168 self.cleanup_test_files, self.tool) | |
169 | |
170 def OnTestsCompleted(self, test_runners, test_results): | |
171 """Notifies that we completed the tests.""" | |
172 test_results.LogFull() | |
173 if test_results.failed and self.rebaseline: | |
174 test_runners[0].UpdateFilter(test_results.failed) | |
175 | |
176 | |
177 def Dispatch(options): | |
178 """Dispatches the tests, sharding if possible. | |
179 | |
180 If options.use_emulator is True, all tests will be run in a new emulator | |
181 instance. | |
182 | |
183 Args: | |
184 options: options for running the tests. | |
185 | |
186 Returns: | |
187 0 if successful, number of failing tests otherwise. | |
188 """ | |
189 if options.test_suite == 'help': | |
190 ListTestSuites() | |
191 return 0 | |
192 buildbot_emulator = None | |
193 attached_devices = [] | |
194 | |
195 if options.use_emulator: | |
196 buildbot_emulator = emulator.Emulator() | |
197 buildbot_emulator.Launch() | |
198 attached_devices.append(buildbot_emulator.device) | |
199 else: | |
200 attached_devices = android_commands.GetAttachedDevices() | |
201 | |
202 if not attached_devices: | |
203 logging.critical('A device must be attached and online.') | |
204 return 1 | |
205 | |
206 if (len(attached_devices) > 1 and options.test_suite and | |
207 not options.gtest_filter and not options.performance_test): | |
208 sharder = TestSharder(attached_devices, options.test_suite, | |
209 options.gtest_filter, options.test_arguments, | |
210 options.timeout, options.rebaseline, | |
211 options.performance_test, | |
212 options.cleanup_test_files, options.tool) | |
213 test_results = sharder.RunShardedTests() | |
214 else: | |
215 test_results = RunTests(attached_devices[0], options.test_suite, | |
216 options.gtest_filter, options.test_arguments, | |
217 options.rebaseline, options.timeout, | |
218 options.performance_test, | |
219 options.cleanup_test_files, options.tool, | |
220 options.log_dump) | |
221 if buildbot_emulator: | |
222 buildbot_emulator.Shutdown() | |
223 return len(test_results.failed) | |
224 | |
225 def ListTestSuites(): | |
226 """Display a list of available test suites | |
227 """ | |
228 print 'Available test suites are:' | |
229 for test_suite in _TEST_SUITES: | |
230 print test_suite | |
231 | |
232 | |
233 def main(argv): | |
234 option_parser = CreateTestRunnerOptionParser(None, default_timeout=0) | |
235 option_parser.add_option('-s', dest='test_suite', | |
236 help='Executable name of the test suite to run ' | |
237 '(use -s help to list them)') | |
238 option_parser.add_option('-r', dest='rebaseline', | |
239 help='Rebaseline and update *testsuite_disabled', | |
240 action='store_true', | |
241 default=False) | |
242 option_parser.add_option('-f', dest='gtest_filter', | |
243 help='gtest filter') | |
244 option_parser.add_option('-a', '--test_arguments', dest='test_arguments', | |
245 help='Additional arguments to pass to the test') | |
246 option_parser.add_option('-p', dest='performance_test', | |
247 help='Indicator of performance test', | |
248 action='store_true', | |
249 default=False) | |
250 option_parser.add_option('-L', dest='log_dump', | |
251 help='file name of log dump, which will be put in' | |
252 'subfolder debug_info_dumps under the same directory' | |
253 'in where the test_suite exists.') | |
254 option_parser.add_option('-e', '--emulator', dest='use_emulator', | |
255 help='Run tests in a new instance of emulator', | |
256 action='store_true', | |
257 default=False) | |
258 options, args = option_parser.parse_args(argv) | |
259 if len(args) > 1: | |
260 print 'Unknown argument:', args[1:] | |
261 option_parser.print_usage() | |
262 sys.exit(1) | |
263 SetLogLevel(options.verbose_count) | |
264 return Dispatch(options) | |
265 | |
Nirnimesh
2011/10/21 08:16:15
nit: need another blank line here
michaelbai
2011/10/21 21:08:41
Done.
| |
266 if __name__ == '__main__': | |
267 sys.exit(main(sys.argv)) | |
OLD | NEW |